text stringlengths 4 1.02M | meta dict |
|---|---|
import structlog
from banal import ensure_list, ensure_dict
from normality import normalize, stringify
log = structlog.get_logger(__name__)
class Lookup(object):
"""Lookups are ways of patching up broken input data from a source."""
def __init__(self, name, config):
self.name = name
self.required = config.pop("required", False)
self.normalize = config.pop("normalize", False)
self.lowercase = config.pop("lowercase", False)
self.options = set()
for option in ensure_list(config.pop("options", [])):
self.options.add(Option(self, option))
for match, value in ensure_dict(config.pop("map", {})).items():
option = {"match": match, "value": value}
self.options.add(Option(self, option))
def match(self, value):
results = []
for option in self.options:
if option.matches(value):
results.append(option.result)
if len(results) > 1:
log.error("Ambiguous result", value=value)
for result in results:
return result
if self.required:
log.error("Missing lookup result", value=value)
def get_value(self, value, default=None):
res = self.match(value)
if res is not None:
return res.value
return default
def get_values(self, value, default=None):
res = self.match(value)
if res is not None:
return res.values
return ensure_list(default)
class Option(object):
"""One possible lookup rule that might match a value."""
def __init__(self, lookup, config):
self.lookup = lookup
self.normalize = config.pop("normalize", lookup.normalize)
self.lowercase = config.pop("lowercase", lookup.lowercase)
contains = ensure_list(config.pop("contains", []))
self.contains = [self.normalize_value(c) for c in contains]
match = ensure_list(config.pop("match", []))
self.match = [self.normalize_value(m) for m in match]
self.result = Result(config)
def normalize_value(self, value):
if self.normalize:
value = normalize(value, ascii=True)
else:
value = stringify(value)
if value is not None:
if self.lowercase:
value = value.lower()
value = value.strip()
return value
def matches(self, value):
norm_value = self.normalize_value(value)
if norm_value is not None:
for cand in self.contains:
if cand in norm_value:
return True
return norm_value in self.match
@property
def criteria(self):
criteria = set(self.match)
criteria.update((f"c({c})" for c in self.contains))
return sorted(criteria)
def __str__(self):
return "|".join(self.criteria)
def __repr__(self):
return "<Option(%r, %r)>" % (str(self), self.result)
def __hash__(self):
return hash(str(self))
class Result(object):
def __init__(self, data):
self._data = data
@property
def values(self):
values = self._data.pop("values", self.value)
return ensure_list(values)
def __getattr__(self, name):
try:
return self._data[name]
except KeyError:
return None
def __repr__(self):
return "<Result(%r, %r)>" % (self.values, self._data)
| {
"content_hash": "c585f54d754912044aead4a8ab671e33",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 74,
"avg_line_length": 31.107142857142858,
"alnum_prop": 0.574052812858783,
"repo_name": "pudo/opennames",
"id": "1c88cc5db018065190b3a50f975aa4117a014683",
"size": "3484",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "opensanctions/core/lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5451"
},
{
"name": "Python",
"bytes": "58743"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
from shinytest import ShinyTestCase
class TestNpc(ShinyTestCase):
def setUp(self):
ShinyTestCase.setUp(self)
from shinymud.models.player import Player
from shinymud.models.npc import Npc
from shinymud.models.area import Area
from shinymud.models.room import Room
from shinymud.modes.build_mode import BuildMode
from shinymud.data import config
self.PERMS = config.PERMS
self.bob = Player(('bob', 'bar'))
self.bob.mode = None
self.bob.playerize({'name':'bob'})
self.world.player_add(self.bob)
self.bob.mode = BuildMode(self.bob)
self.bob.permissions = self.bob.permissions | self.PERMS['builder']
self.area = Area.create({'name': 'foo'})
self.room = self.area.new_room()
self.area2 = Area.create({'name': 'SimCity'})
self.area2_script = self.area2.new_script()
def tearDown(self):
del self.area
del self.area2
del self.bob
def test_existance(self):
"""Test if an NPC can exist within an area properly (unspawned)"""
self.npc = self.area.new_npc()
self.npc.characterize({'name': 'bobert'})
self.assertTrue(self.npc in self.area.npcs.values())
def test_build_add_remove_events(self):
npc = self.area.new_npc()
fail_message = 'Type "help events" for help with this command.'
message = npc.build_add_event('', self.bob)
self.assertEqual(message, fail_message)
#test for non-existant scripts
message = npc.build_add_event('pc_enter call script 1', self.bob)
self.assertEqual(message, "Script 1 doesn't exist.")
message = npc.build_add_event("hears 'spam' call script 0", self.bob)
self.assertEqual(message, "Script 0 doesn't exist.")
message = npc.build_add_event("hears 'spam' call script 602", self.bob)
self.assertEqual(message, "Script 602 doesn't exist.")
script = self.area.new_script()
#Test basic add
message = npc.build_add_event('pc_enter call script 1', self.bob)
self.assertEqual(message, 'Event added.' )
#Test invalid areas
message = npc.build_add_event('pc_enter call script 1 from area AreaDontExist', self.bob)
self.assertEqual(message, 'Area "AreaDontExist" doesn\'t exist.')
message = npc.build_add_event('pc_enter call script 1 from area AreaDontExist 100', self.bob)
self.assertEqual(message, 'Area "AreaDontExist" doesn\'t exist.')
#Test invalid probabilities.
message = npc.build_add_event('pc_enter call script 1 0', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
message = npc.build_add_event('pc_enter call script 1 101', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
message = npc.build_add_event('pc_enter call script 1 9999', self.bob)
self.assertEqual(message, 'Probability value must be between 1 and 100.')
#Test different froms of valid adds.
message = npc.build_add_event('pc_enter call script 1 50', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call script 1 from area SimCity', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call script 1 from area SimCity 75', self.bob)
self.assertEqual(message, 'Event added.')
message = npc.build_add_event('pc_enter call 1 from SimCity 50', self.bob)
self.assertEqual(message, 'Event added.')
#Test for trigger 'hears'
message = npc.build_add_event("hears 'food' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Test for trigger 'emoted'
message = npc.build_add_event("emoted 'slap' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Technically invalid, but will be left to user responsibility for now.
#(it shouldn't ever cause a crash)
message = npc.build_add_event("emoted 'emotedontexist' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Test for new items
self.area.new_item()
message = npc.build_add_event("given_item 'item 1' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#Technically invalid, but will be left to user responsibility for now.
#(it shouldn't ever cause a crash)
message = npc.build_add_event("given_item 'item 5' call script 1", self.bob)
self.assertEqual(message, 'Event added.' )
#we should now have 5 successfully added events in pc_enter
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter -1", self.bob)
self.assertEqual(message, 'Try: "remove event <event-trigger> <event-id>" or see "help npc events".' )
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter 5", self.bob)
self.assertEqual(message, "Npc 1 doesn't have the event pc_enter #5." )
self.assertEqual(len(npc.events['pc_enter']), 5)
message = npc.build_remove_event("pc_enter 4", self.bob)
self.assertEqual(message, 'Event pc_enter, number 4 has been removed.')
self.assertEqual(len(npc.events['pc_enter']), 4)
message = npc.build_remove_event("given_item 1", self.bob)
self.assertEqual(message, 'Event given_item, number 1 has been removed.')
self.assertEqual(len(npc.events['pc_enter']), 4)
self.assertEqual(len(npc.events['given_item']), 1)
def test_build_add_remove_permissions(self):
npc = self.area.new_npc()
#set npc permissions to nothing.
npc.permissions = 0
message = npc.build_add_permission("dm", self.bob)
self.assertEqual(message, "You need to be GOD in order to edit an npc's permissions.")
#Current permission level needed for messing with npc perms is 'god'. (for when this test was written)
#Change as needed!
self.bob.permissions = self.bob.permissions | self.PERMS['god']
#Bad input tests
message = npc.build_add_permission("", self.bob)
self.assertEqual(message, 'Try: "add permission <permission group>". See "help permissions".')
message = npc.build_add_permission("monkey", self.bob)
self.assertTrue('Valid permissions are: admin, player, builder, dm, god\n' in message)
#good input tests
message = npc.build_add_permission("god", self.bob)
self.assertTrue('Shiny McShinerson now has god permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'])
message = npc.build_add_permission("dm", self.bob)
self.assertTrue('Shiny McShinerson now has dm permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'])
self.assertTrue(npc.permissions is not self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
message = npc.build_add_permission("admin", self.bob)
self.assertTrue('Shiny McShinerson now has admin permissions.' in message)
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
#Removing Permissions
#reset bobs permissions for next test
self.bob.permissions = 0
message = npc.build_remove_permission("dm", self.bob)
self.assertEqual(message, "You need to be GOD in order to edit an npc's permissions.")
#Current permission level needed for messing with npc perms is 'god'. (for when this test was written)
#Change as needed!
self.bob.permissions = self.bob.permissions | self.PERMS['god']
#Bad input tests
message = npc.build_remove_permission("", self.bob)
self.assertEqual(message, 'Try: "remove permission <permission group>", or see "help permissions".')
message = npc.build_remove_permission("monkey", self.bob)
self.assertEqual("Shiny McShinerson doesn't have monkey permissions.", message)
#Good input tests
self.assertTrue(npc.permissions is self.PERMS['god'] | self.PERMS['dm'] | self.PERMS['admin'])
message = npc.build_remove_permission("god", self.bob)
self.assertEqual('Shiny McShinerson no longer has god permissions.', message)
self.assertTrue(npc.permissions is self.PERMS['dm'] | self.PERMS['admin'])
self.assertTrue(npc.permissions < self.PERMS['god'])
message = npc.build_remove_permission("dm", self.bob)
self.assertEqual('Shiny McShinerson no longer has dm permissions.', message)
self.assertTrue(npc.permissions is self.PERMS['admin'])
self.assertTrue(npc.permissions >= self.PERMS['dm'])
message = npc.build_remove_permission("admin", self.bob)
self.assertEqual('Shiny McShinerson no longer has admin permissions.', message)
self.assertTrue(npc.permissions is 0)
def test_build_add_remove_ai(self):
npc = self.area.new_npc()
#Test adding ai pack
message = npc.build_add_ai("", self.bob)
self.assertEqual('Try: "add ai <ai-pack-name>", or type "help ai packs".', message)
message = npc.build_add_ai("doesnotexist", self.bob)
self.assertEqual('"doesnotexist" is not a valid ai pack. See "help ai packs".', message)
message = npc.build_add_ai("merchant", self.bob)
self.assertEqual("This npc (Shiny McShinerson) is now a merchant.", message)
message = npc.build_add_ai("merchant", self.bob)
self.assertEqual('This npc (Shiny McShinerson) already has that ai pack.', message)
#Test basic add behavior for ai pack
message = str(npc)
self.assertTrue("MERCHANT ATTRIBUTES:" in message)
#Test removing ai pack
message = npc.build_remove_ai("", self.bob)
self.assertEqual('Try: "remove ai <ai-pack-name>", or type "help ai packs".', message)
message = npc.build_remove_ai("doesnotexist", self.bob)
self.assertEqual('This npc doesn\'t have the "doesnotexist" ai type.', message)
message = npc.build_remove_ai("merchant", self.bob)
self.assertEqual('Npc 1 (Shiny McShinerson) no longer has merchant ai.', message)
message = npc.build_remove_ai("merchant", self.bob)
self.assertEqual('This npc doesn\'t have the "merchant" ai type.', message)
| {
"content_hash": "bda7d48fc1532d2478bfc4ddd96bf215",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 110,
"avg_line_length": 53.974747474747474,
"alnum_prop": 0.6433049499391784,
"repo_name": "shinymud/ShinyMUD",
"id": "be6f999b68334f7b6e1d53d020cd0749ba6bdf72",
"size": "10687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/shinytest/models/test_npc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "545213"
}
],
"symlink_target": ""
} |
from boundaries.base_views import APIView
from django.shortcuts import get_object_or_404
from postcodes.models import Postcode
class PostcodeDetailView(APIView):
model = Postcode
def get(self, request, code):
postcode = get_object_or_404(Postcode, code=code)
if request.GET.get('sets'):
sets = request.GET['sets'].split(',')
else:
sets = None
return postcode.as_dict(sets=sets)
| {
"content_hash": "55b18431a28abe142d2778bfec5311bf",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 57,
"avg_line_length": 27.875,
"alnum_prop": 0.6547085201793722,
"repo_name": "opennorth/represent-postcodes",
"id": "0adade216dd7d8595925de7443b71c1abc7ca64a",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "postcodes/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13641"
}
],
"symlink_target": ""
} |
import json
import os
# Celery
from celery import task
# OpenStack
import keystoneclient.v2_0.client as ksclient
from novaclient.v1_1 import client
# Cosmo
from cosmo.events import send_event, get_cosmo_properties
@task
def provision(__cloudify_id, nova_config, security_group, **kwargs):
nova_client = _init_client(nova_config)
if _get_sg_by_name(nova_client, security_group['name']):
raise RuntimeError("Can not provision sg with name '{0}' because sg with such name already exists"
.format(security_group['name']))
sg = nova_client.security_groups.create(security_group['name'], security_group.get('description', '(no description)'))
for rule in security_group['rules']:
nova_client.security_group_rules.create(
sg.id,
ip_protocol="tcp",
from_port=rule['port'],
to_port=rule['port'],
cidr=rule.get('cidr'),
group_id=rule.get('group_id')
)
send_event(__cloudify_id, "cosmo_manager", "sg status", "state", "running") # XXX
@task
def terminate(nova_config, security_group, **kwargs):
nova_client = _init_client(nova_config)
sg = _get_sg_by_name(nova_client, security_group['name'])
nova_client.security_groups.delete(sg.id)
def _init_client(nova_config):
config_path = os.getenv('KEYSTONE_CONFIG_PATH', os.path.expanduser('~/keystone_config.json'))
with open(config_path, 'r') as f:
keystone_config = json.loads(f.read())
region = nova_config.get('region', keystone_config.get('region', None))
return client.Client(username=keystone_config['username'],
api_key=keystone_config['password'],
project_id=keystone_config['tenant_name'],
auth_url=keystone_config['auth_url'],
region_name=region,
http_log_debug=False)
def _get_sg_by_name(nova_client, name):
# TODO: check whether nova_client can get sgs only named `name`
sgs = nova_client.security_groups.list()
matching_sgs = [sg for sg in sgs if sg.name == name]
if len(matching_sgs) == 0:
return None
if len(matching_sgs) == 1:
return matching_sgs[0]
raise RuntimeError("Lookup of sg by name failed. There are {0} sgs named '{1}'"
.format(len(matching_sgs), name))
def _get_sg_by_name_or_fail(nova_client, name):
sg = _get_sg_by_name(nova_client, name)
if sg:
return sg
raise ValueError("Lookup of sg by name failed. Could not find a sg with name {0}".format(name))
if __name__ == '__main__':
nova_client = _init_client()
json.dumps(nova_client.security_groups.list(), indent=4, sort_keys=True)
| {
"content_hash": "d54971b66c247292af61d2fb0973985a",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 122,
"avg_line_length": 36.72,
"alnum_prop": 0.6227305737109659,
"repo_name": "Fewbytes/cosmo-plugin-openstack-sg-provisioner",
"id": "ffc410c0c6daf1e13a37e921208df9c7395b1ecd",
"size": "2808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_sg_provisioner/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5497"
}
],
"symlink_target": ""
} |
"""
Tests for the blaze interface to the pipeline api.
"""
from __future__ import division
from collections import OrderedDict
from datetime import timedelta
from unittest import TestCase
import warnings
import blaze as bz
from datashape import dshape, var, Record
from nose_parameterized import parameterized
import numpy as np
from numpy.testing.utils import assert_array_almost_equal
import pandas as pd
from pandas.util.testing import assert_frame_equal
from toolz import keymap, valmap, concatv
from toolz.curried import operator as op
from zipline.pipeline import Pipeline, CustomFactor
from zipline.pipeline.data import DataSet, BoundColumn
from zipline.pipeline.engine import SimplePipelineEngine
from zipline.pipeline.loaders.blaze import (
from_blaze,
BlazeLoader,
NoDeltasWarning,
NonNumpyField,
NonPipelineField,
)
from zipline.utils.numpy_utils import repeat_last_axis
from zipline.utils.test_utils import tmp_asset_finder, make_simple_asset_info
nameof = op.attrgetter('name')
dtypeof = op.attrgetter('dtype')
asset_infos = (
(make_simple_asset_info(
tuple(map(ord, 'ABC')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
(make_simple_asset_info(
tuple(map(ord, 'ABCD')),
pd.Timestamp(0),
pd.Timestamp('2015'),
),),
)
with_extra_sid = parameterized.expand(asset_infos)
class BlazeToPipelineTestCase(TestCase):
@classmethod
def setUpClass(cls):
cls.dates = dates = pd.date_range('2014-01-01', '2014-01-03')
dates = cls.dates.repeat(3)
cls.sids = sids = ord('A'), ord('B'), ord('C')
cls.df = df = pd.DataFrame({
'sid': sids * 3,
'value': (0, 1, 2, 1, 2, 3, 2, 3, 4),
'asof_date': dates,
'timestamp': dates,
})
cls.dshape = dshape("""
var * {
sid: ?int64,
value: ?float64,
asof_date: datetime,
timestamp: datetime
}
""")
cls.macro_df = df[df.sid == 65].drop('sid', axis=1)
dshape_ = OrderedDict(cls.dshape.measure.fields)
del dshape_['sid']
cls.macro_dshape = var * Record(dshape_)
cls.garbage_loader = BlazeLoader()
def test_tabular(self):
name = 'expr'
expr = bz.Data(self.df, name=name, dshape=self.dshape)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(ds.__name__, name)
self.assertTrue(issubclass(ds, DataSet))
self.assertEqual(
{c.name: c.dtype for c in ds._columns},
{'sid': np.int64, 'value': np.float64},
)
for field in ('timestamp', 'asof_date'):
with self.assertRaises(AttributeError) as e:
getattr(ds, field)
self.assertIn("'%s'" % field, str(e.exception))
self.assertIn("'datetime'", str(e.exception))
# test memoization
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
ds,
)
def test_column(self):
exprname = 'expr'
expr = bz.Data(self.df, name=exprname, dshape=self.dshape)
value = from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertEqual(value.name, 'value')
self.assertIsInstance(value, BoundColumn)
self.assertEqual(value.dtype, np.float64)
# test memoization
self.assertIs(
from_blaze(
expr.value,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value,
)
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
).value,
value,
)
# test the walk back up the tree
self.assertIs(
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
),
value.dataset,
)
self.assertEqual(value.dataset.__name__, exprname)
def test_missing_asof(self):
expr = bz.Data(
self.df.loc[:, ['sid', 'value', 'timestamp']],
name='expr',
dshape="""
var * {
sid: ?int64,
value: float64,
timestamp: datetime,
}""",
)
with self.assertRaises(TypeError) as e:
from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
self.assertIn("'asof_date'", str(e.exception))
self.assertIn(repr(str(expr.dshape.measure)), str(e.exception))
def test_auto_deltas(self):
expr = bz.Data(
{'ds': self.df,
'ds_deltas': pd.DataFrame(columns=self.df.columns)},
dshape=var * Record((
('ds', self.dshape.measure),
('ds_deltas', self.dshape.measure),
)),
)
loader = BlazeLoader()
ds = from_blaze(expr.ds, loader=loader)
self.assertEqual(len(loader), 1)
exprdata = loader[ds]
self.assertTrue(exprdata.expr.isidentical(expr.ds))
self.assertTrue(exprdata.deltas.isidentical(expr.ds_deltas))
def test_auto_deltas_fail_warn(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter('always')
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
from_blaze(
expr,
loader=loader,
no_deltas_rule='warn',
)
self.assertEqual(len(ws), 1)
w = ws[0].message
self.assertIsInstance(w, NoDeltasWarning)
self.assertIn(str(expr), str(w))
def test_auto_deltas_fail_raise(self):
loader = BlazeLoader()
expr = bz.Data(self.df, dshape=self.dshape)
with self.assertRaises(ValueError) as e:
from_blaze(
expr,
loader=loader,
no_deltas_rule='raise',
)
self.assertIn(str(expr), str(e.exception))
def test_non_numpy_field(self):
expr = bz.Data(
[],
dshape="""
var * {
a: datetime,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(object.__getattribute__(ds, 'a'), NonNumpyField)
def test_non_pipeline_field(self):
# NOTE: This test will fail if we ever allow string types in
# the Pipeline API. If this happens, change the dtype of the `a` field
# of expr to another type we don't allow.
expr = bz.Data(
[],
dshape="""
var * {
a: string,
asof_date: datetime,
timestamp: datetime,
}""",
)
ds = from_blaze(
expr,
loader=self.garbage_loader,
no_deltas_rule='ignore',
)
with self.assertRaises(AttributeError):
ds.a
self.assertIsInstance(
object.__getattribute__(ds, 'a'),
NonPipelineField,
)
def test_complex_expr(self):
expr = bz.Data(self.df, dshape=self.dshape)
# put an Add in the table
expr_with_add = bz.transform(expr, value=expr.value + 1)
# Test that we can have complex expressions with no deltas
from_blaze(
expr_with_add,
deltas=None,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1, # put an Add in the column
deltas=None,
loader=self.garbage_loader,
)
deltas = bz.Data(
pd.DataFrame(columns=self.df.columns),
dshape=self.dshape,
)
with self.assertRaises(TypeError):
from_blaze(
expr_with_add,
deltas=deltas,
loader=self.garbage_loader,
)
with self.assertRaises(TypeError):
from_blaze(
expr.value + 1,
deltas=deltas,
loader=self.garbage_loader,
)
def test_id(self):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
with tmp_asset_finder() as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
expected = self.df.drop('asof_date', axis=1).set_index(
['timestamp', 'sid'],
)
expected.index = pd.MultiIndex.from_product((
expected.index.levels[0],
finder.retrieve_all(expected.index.levels[1]),
))
assert_frame_equal(result, expected, check_dtype=False)
def test_id_macro_dataset(self):
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
loader = BlazeLoader()
ds = from_blaze(
expr,
loader=loader,
no_deltas_rule='ignore',
)
p = Pipeline()
p.add(ds.value.latest, 'value')
dates = self.dates
asset_info = asset_infos[0][0]
with tmp_asset_finder(asset_info) as finder:
result = SimplePipelineEngine(
loader,
dates,
finder,
).run_pipeline(p, dates[0], dates[-1])
nassets = len(asset_info)
expected = pd.DataFrame(
list(concatv([0] * nassets, [1] * nassets, [2] * nassets)),
index=pd.MultiIndex.from_product((
self.macro_df.timestamp,
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
assert_frame_equal(result, expected, check_dtype=False)
def _run_pipeline(self,
expr,
deltas,
expected_views,
expected_output,
finder,
calendar,
start,
end,
window_length,
compute_fn):
loader = BlazeLoader()
ds = from_blaze(
expr,
deltas,
loader=loader,
no_deltas_rule='raise',
)
p = Pipeline()
# prevent unbound locals issue in the inner class
window_length_ = window_length
class TestFactor(CustomFactor):
inputs = ds.value,
window_length = window_length_
def compute(self, today, assets, out, data):
assert_array_almost_equal(data, expected_views[today])
out[:] = compute_fn(data)
p.add(TestFactor(), 'value')
result = SimplePipelineEngine(
loader,
calendar,
finder,
).run_pipeline(p, start, end)
assert_frame_equal(
result,
expected_output,
check_dtype=False,
)
@with_extra_sid
def test_deltas(self, asset_info):
expr = bz.Data(self.df, name='expr', dshape=self.dshape)
deltas = bz.Data(self.df, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': np.array([[10.0, 11.0, 12.0],
[1.0, 2.0, 3.0]]),
'2014-01-03': np.array([[11.0, 12.0, 13.0],
[2.0, 3.0, 4.0]]),
'2014-01-04': np.array([[12.0, 13.0, 14.0],
[12.0, 13.0, 14.0]]),
})
nassets = len(asset_info)
if nassets == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan]],
expected_views,
)
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([12] * nassets, [13] * nassets, [14] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
dates = dates.insert(len(dates), dates[-1] + timedelta(days=1))
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
def test_deltas_macro(self):
asset_info = asset_infos[0][0]
expr = bz.Data(self.macro_df, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(
self.macro_df.iloc[:-1],
name='deltas',
dshape=self.macro_dshape,
)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-02': repeat_last_axis(np.array([10.0, 1.0]), nassets),
'2014-01-03': repeat_last_axis(np.array([11.0, 2.0]), nassets),
})
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
dates = self.dates
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=dates,
start=dates[1],
end=dates[-1],
window_length=2,
compute_fn=np.nanmax,
)
@with_extra_sid
def test_novel_deltas(self, asset_info):
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
repeated_dates = base_dates.repeat(3)
baseline = pd.DataFrame({
'sid': self.sids * 2,
'value': (0, 1, 2, 1, 2, 3),
'asof_date': repeated_dates,
'timestamp': repeated_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0]]),
'2014-01-06': np.array([[10.0, 11.0, 12.0],
[10.0, 11.0, 12.0],
[11.0, 12.0, 13.0]]),
})
if len(asset_info) == 4:
expected_views = valmap(
lambda view: np.c_[view, [np.nan, np.nan, np.nan]],
expected_views,
)
expected_output_buffer = [10, 11, 12, np.nan, 11, 12, 13, np.nan]
else:
expected_output_buffer = [10, 11, 12, 11, 12, 13]
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
expected_output_buffer,
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
def test_novel_deltas_macro(self):
asset_info = asset_infos[0][0]
base_dates = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-04')
])
baseline = pd.DataFrame({
'value': (0, 1),
'asof_date': base_dates,
'timestamp': base_dates,
})
expr = bz.Data(baseline, name='expr', dshape=self.macro_dshape)
deltas = bz.Data(baseline, name='deltas', dshape=self.macro_dshape)
deltas = bz.transform(
deltas,
value=deltas.value + 10,
timestamp=deltas.timestamp + timedelta(days=1),
)
nassets = len(asset_info)
expected_views = keymap(pd.Timestamp, {
'2014-01-03': repeat_last_axis(
np.array([10.0, 10.0, 10.0]),
nassets,
),
'2014-01-06': repeat_last_axis(
np.array([10.0, 10.0, 11.0]),
nassets,
),
})
cal = pd.DatetimeIndex([
pd.Timestamp('2014-01-01'),
pd.Timestamp('2014-01-02'),
pd.Timestamp('2014-01-03'),
# omitting the 4th and 5th to simulate a weekend
pd.Timestamp('2014-01-06'),
])
with tmp_asset_finder(asset_info) as finder:
expected_output = pd.DataFrame(
list(concatv([10] * nassets, [11] * nassets)),
index=pd.MultiIndex.from_product((
sorted(expected_views.keys()),
finder.retrieve_all(asset_info.index),
)),
columns=('value',),
)
self._run_pipeline(
expr,
deltas,
expected_views,
expected_output,
finder,
calendar=cal,
start=cal[2],
end=cal[-1],
window_length=3,
compute_fn=op.itemgetter(-1),
)
| {
"content_hash": "bf44c46622023ea966cb7aa9c0523d41",
"timestamp": "",
"source": "github",
"line_count": 617,
"max_line_length": 78,
"avg_line_length": 31.905996758508913,
"alnum_prop": 0.487656202377324,
"repo_name": "jimgoo/zipline-fork",
"id": "c42f4e1490003b67ba3a006445f9059b748ff70a",
"size": "19686",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/pipeline/test_blaze.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "168399"
},
{
"name": "Python",
"bytes": "1549402"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
import re
import os.path
from io import open
from setuptools import find_packages, setup
# Change the PACKAGE_NAME only to change folder and different name
PACKAGE_NAME = "azure-mgmt-marketplaceordering"
PACKAGE_PPRINT_NAME = "Market Place Ordering"
# a-b-c => a/b/c
package_folder_path = PACKAGE_NAME.replace('-', '/')
# a-b-c => a.b.c
namespace_name = PACKAGE_NAME.replace('-', '.')
# Version extraction inspired from 'requests'
with open(os.path.join(package_folder_path, 'version.py')
if os.path.exists(os.path.join(package_folder_path, 'version.py'))
else os.path.join(package_folder_path, '_version.py'), 'r') as fd:
version = re.search(r'^VERSION\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with open('README.md', encoding='utf-8') as f:
readme = f.read()
with open('CHANGELOG.md', encoding='utf-8') as f:
changelog = f.read()
setup(
name=PACKAGE_NAME,
version=version,
description='Microsoft Azure {} Client Library for Python'.format(PACKAGE_PPRINT_NAME),
long_description=readme + '\n\n' + changelog,
long_description_content_type='text/markdown',
license='MIT License',
author='Microsoft Corporation',
author_email='azpysdkhelp@microsoft.com',
url='https://github.com/Azure/azure-sdk-for-python',
keywords="azure, azure sdk", # update with search keywords relevant to the azure service / product
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'License :: OSI Approved :: MIT License',
],
zip_safe=False,
packages=find_packages(exclude=[
'tests',
# Exclude packages that will be covered by PEP420 or nspkg
'azure',
'azure.mgmt',
]),
include_package_data=True,
package_data={
'pytyped': ['py.typed'],
},
install_requires=[
"msrest>=0.7.1",
"azure-common~=1.1",
"azure-mgmt-core>=1.3.2,<2.0.0",
"typing-extensions>=4.3.0; python_version<'3.8.0'",
],
python_requires=">=3.7"
)
| {
"content_hash": "796c2d58604363152e986e16e73ab26b",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 103,
"avg_line_length": 35.042857142857144,
"alnum_prop": 0.62291072156543,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c1ab69102ad07eef37579d074173fa185b3bddb0",
"size": "2785",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/marketplaceordering/azure-mgmt-marketplaceordering/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from ._farm_beats_client import FarmBeatsClient
__all__ = ['FarmBeatsClient']
| {
"content_hash": "16035e01f095056480803101f9dbc2dc",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 47,
"avg_line_length": 39,
"alnum_prop": 0.7435897435897436,
"repo_name": "Azure/azure-sdk-for-python",
"id": "8250e30455784bd0f727bf199e470f56e3d218e7",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/aio/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import os
import re
from slack_bolt import App
from slack_bolt.adapter.socket_mode import SocketModeHandler
from sympy import sympify, SympifyError
app = App(token=os.environ["SLACK_BOT_TOKEN"])
@app.message(re.compile(r"^([-+*/^%!().\d\s]+)$"))
def calc(message, context, say):
try:
formula = context["matches"][0]
result = sympify(formula) # Simplifies the formula
if result.is_Integer:
answer = int(result) # Convert to integer value
else:
answer = float(result) # Convert to float value
say(f"{answer:,}")
except SympifyError:
pass
if __name__ == "__main__":
app_token = os.environ["SLACK_APP_TOKEN"]
SocketModeHandler(app, app_token).start()
| {
"content_hash": "7a89b6e6933564525968b1236fd0ba02",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 60,
"avg_line_length": 27.555555555555557,
"alnum_prop": 0.6276881720430108,
"repo_name": "takanory/slides",
"id": "c1952c183c22cd0c3cff1b566b25d43c1778343f",
"size": "744",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "slides/20220713europython/code/app-sympy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1526"
},
{
"name": "Makefile",
"bytes": "723"
},
{
"name": "Python",
"bytes": "49923"
}
],
"symlink_target": ""
} |
"""
.. module:: win_symlink
:platform: Windows
:synopsis:
Add support in Python for symlinks under Windows Vista and up, if not
already present.
.. moduleauthor:: Brandon Huber
"""
#
# Original Creation Date: 2016-05-07
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import os.path
import re
import subprocess
__version__ = '0.1a'
def get_comspec(_dbg_os_environ=os.environ):
"""Figure out what %COMSPEC% is.
This could be harder than it sounds; there's no guarantee that keys for
os.environ are case-insensitive in Cygwin.
The function first checks for %COMSPEC%, then %ComSpec% (a common
capitalization), then does a case-insensitive search through os.environ
for any match for "comspec".
Finally, if no match was found, "cmd.exe" will be returned.
:return: The value for %COMSPEC%, or "cmd.exe" if %COMSPEC% is not set.
:rtype: str
"""
if os.environ.get('COMSPEC', ''):
return os.environ['COMSPEC']
if os.environ.get('ComSpec', ''):
return os.environ['ComSpec']
for k, v in os.environ.items():
if k.lower() == 'comspec' and v:
return v
return 'cmd.exe'
def win_symlink(existing, new, is_dir=None, comspec=None,
_dbg_subprocess=subprocess, _dbg_os_isdir=os.path.isdir
):
"""Create a symlink using "cmd.exe /c mklink".
:platform: Windows
:param str existing:
The existing file or directory that the symlink will point to.
:param str new:
The pathname of the new symlink.
:param bool is_dir:
Whether or not ``existing`` is a file or directory. If ``None``,
this function will determine it automatically.
:param str comspec:
The command interpreter to use. This is almost always ``cmd.exe``.
If passed in as ``None``, then the proper value will be automatically
determined via :func:`get_comspec`.
:raise ValueError: When ``new`` already exists.
:raise subprocess.CalledProcessError:
When the call to %COMPSPEC% /c mklink fails.
"""
if os.path.exists(new):
raise ValueError(
'Specified symlink name ("{}") already exists!'.format(new)
)
if is_dir is None:
is_dir = os.path.isdir(existing)
if not comspec:
comspec = get_comspec()
cmd = [
comspec, '/c', '/d', '/e:off', '/e:off', '/f:off', '/q', '/v:off',
'mklink'
]
if is_dir:
cmd.append('/d')
cmd.extend([ new, existing ])
subp = _dbg_subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = subp.communicate()
##########################################################################
#
# Handle ``new`` already existing, and is a file.
#
if (
subp.returncode == 1 and
'Cannot create a file when that file already exists.' in stderr
):
#raise OSError(17, 'File exists')
#TODO: Make sure that os.strerror(17) gives us an appropriate string
# under Windows!
raise OSError(17, os.strerror(17))
#
##########################################################################
#
# Handle ``new`` already existing, and is a directory.
#
if (
subp.returncode == 1 and
_dbg_os_isdir(existing) and
'Access is denied.' in stderr
):
#raise OSError(17, 'File exists')
#TODO: Make sure that os.strerror(17) gives us an appropriate string
# under Windows!
raise OSError(17, os.strerror(17))
#
#
##########################################################################
#
# Handle something going wrong in a way we haven't foreseen.
#
if subp.returncode != 0:
sys.stdout.write(stdout)
sys.stderr.write(stderr)
raise subprocess.CalledProcessError(subp.returncode, cmd)
#fsutil reparsepoint query bar.txt
#Reparse Tag Value : 0xa000000c
#Tag value: Microsoft
#Tag value: Name Surrogate
#Tag value: Symbolic Link
#
#Reparse Data Length: 0x00000028
#Reparse Data:
#0000: 0e 00 0e 00 00 00 0e 00 01 00 00 00 66 00 6f 00 ............f.o.
#0010: 6f 00 2e 00 74 00 78 00 74 00 66 00 6f 00 6f 00 o...t.x.t.f.o.o.
#0020: 2e 00 74 00 78 00 74 00 ..t.x.t.
#dir /n /4 /a:l
# Volume in drive C has no label.
# Volume Serial Number is AC3B-91B2
#
# Directory of C:\Users\baz\Documents
#
#09/26/2016 11:34 PM <SYMLINK> bar.txt [foo.txt]
#09/26/2016 11:52 PM <SYMLINKD> f2 [f1]
def win_is_symlink(pathname, _dbg_subprocess=subprocess):
"""Check to see if a pathname is a Windows symlink.
:platform: Windows
:param str pathname:
The pathname to examine.
:return: True if ``pathname`` is a symlink. False otherwise.
:rtype: bool
"""
raise NotImplementedError()
subp = _dbg_subprocess.Popen(
( 'fsutil', 'reparsepoint', 'query', pathname ),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = subp.communicate()
return 'Symbolic Link' in stdout
def win_readlink(
pathname, _dbg_subprocess=subprocess, _dbg_get_comspec=get_comspec
):
dirname, basename = os.path.split(pathname)
cmd = ( _dbg_get_comspec(), '/c', 'dir', '/n', '/4' )
subp = _dbg_subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
out, err = subp.communicate()
if subp.returncode != 0:
sys.stderr.write(stderr)
raise subprocess.CalledProcessError(subp.returncode, cmd)
for x in out.splitlines():
if len(x) < 25 or x[0] == ' ':
continue
s = x[25:]
m = re.match(r'<SYMLINKD?> +(?<entity_name>.+) \[(?<orig_name>.+)\]', s)
if not m:
continue
if m.group('entity_name') != basename:
continue
return m.group('orig_name')
return None
| {
"content_hash": "8388ee0343fe1234b2b8726eb930aec5",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 80,
"avg_line_length": 29.393203883495147,
"alnum_prop": 0.5846407927332783,
"repo_name": "shalesbridge/steamssdmanager",
"id": "360de4919c26b5c3030b7defa68a6193fda1b27b",
"size": "6078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "win_symlink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44074"
}
],
"symlink_target": ""
} |
import datetime
from dateutil.parser import parse
import pytz
import app_config
import logging
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
"""
Tools for managing charts
"""
class TimeTools:
@staticmethod
def seconds_since(a):
now = datetime.datetime.now(pytz.timezone(app_config.PROJECT_TIMEZONE))
return (now - a).total_seconds()
@staticmethod
def hours_since(a):
# For some reason, dates with timezones tend to be returned as unicode
if type(a) is not datetime.datetime:
a = parse(a)
now = datetime.datetime.now(pytz.timezone(app_config.PROJECT_TIMEZONE))
seconds = (now - a).total_seconds()
hours = int(seconds / 60 / 60)
return hours
@staticmethod
def time_bucket(t):
if not t:
return False
# For some reason, dates with timezones tend to be returned as unicode
if type(t) is not datetime.datetime:
t = parse(t)
seconds = TimeTools.seconds_since(t)
# 7th message, 2nd day midnight + 10 hours
# 8th message, 2nd day midnight + 15 hours
second_day_midnight_after_publishing = t + datetime.timedelta(days=2)
second_day_midnight_after_publishing.replace(hour = 0, minute = 0, second=0, microsecond = 0)
seconds_since_second_day = TimeTools.seconds_since(second_day_midnight_after_publishing)
if seconds_since_second_day > 15 * 60 * 60: # 15 hours
return 'Two and a half days'
if seconds_since_second_day > 10 * 60 * 60: # 10 hours
return 'Two days'
# 5th message, 1st day midnight + 10 hours
# 6th message, 1st day midnight + 15 hours
midnight_after_publishing = t + datetime.timedelta(days=1)
midnight_after_publishing.replace(hour = 0, minute = 0, second=0, microsecond = 0)
seconds_since_first_day = TimeTools.seconds_since(midnight_after_publishing)
if seconds_since_second_day > 10 * 60 * 60: # 15 hours
return 'A day and a half'
if seconds_since_second_day > 10 * 60 * 60: # 10 hours
return 'A day'
# 2nd message, tracking start + 4 hours
# 3rd message, tracking start + 8 hours
# 4th message, tracking start + 12 hours
if seconds > 12 * 60 * 60: # 12 hours
return '12 hours'
if seconds > 8 * 60 * 60: # 8 hours
return '8 hours'
if seconds > 4 * 60 * 60: # 4 hours
return '4 hours'
# Too soon to check
return False
@staticmethod
def humanist_time_bucket(linger):
time = ''
if linger['minutes'] > 0:
time += str(linger['minutes'])
if linger['minutes'] == 1:
time += ' minute'
else:
time += ' minutes'
if linger['seconds'] > 0:
if linger['minutes'] > 0:
time += ' '
time += str(linger['seconds'])
if linger['seconds'] == 1:
time += ' second'
else:
time += ' seconds'
return time
| {
"content_hash": "f7e0f60c55675ec6e734057504b6387b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 101,
"avg_line_length": 30.78640776699029,
"alnum_prop": 0.575212866603595,
"repo_name": "thecarebot/carebot",
"id": "3727dce63a1badf2b675d43b7a7f73abb0d95ba7",
"size": "3171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util/time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "203"
},
{
"name": "HTML",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "126000"
},
{
"name": "Shell",
"bytes": "128"
}
],
"symlink_target": ""
} |
import base64
import logging
import re
import time
from html import unescape as html_unescape
from Crypto.Cipher import PKCS1_v1_5
from Crypto.PublicKey import RSA
import streamlink
from streamlink.exceptions import FatalPluginError
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.plugin.api.utils import itertags
from streamlink.plugin.api.validate import Schema
from streamlink.stream.dash import DASHStream
from streamlink.utils.parse import parse_json
log = logging.getLogger(__name__)
class SteamLoginFailed(Exception):
pass
@pluginmatcher(re.compile(
r"https?://steamcommunity\.com/broadcast/watch/(\d+)"
))
@pluginmatcher(re.compile(
r"https?://steam\.tv/(\w+)"
))
class SteamBroadcastPlugin(Plugin):
_watch_broadcast_url = "https://steamcommunity.com/broadcast/watch/"
_get_broadcast_url = "https://steamcommunity.com/broadcast/getbroadcastmpd/"
_user_agent = "streamlink/{}".format(streamlink.__version__)
_broadcast_schema = Schema({
"success": validate.any("ready", "unavailable", "waiting", "waiting_to_start", "waiting_for_start"),
"retry": int,
"broadcastid": validate.any(validate.text, int),
validate.optional("url"): validate.url(),
validate.optional("viewertoken"): validate.text
})
_get_rsa_key_url = "https://steamcommunity.com/login/getrsakey/"
_rsa_key_schema = validate.Schema({
"publickey_exp": validate.all(validate.text, validate.transform(lambda x: int(x, 16))),
"publickey_mod": validate.all(validate.text, validate.transform(lambda x: int(x, 16))),
"success": True,
"timestamp": validate.text,
"token_gid": validate.text
})
_dologin_url = "https://steamcommunity.com/login/dologin/"
_dologin_schema = validate.Schema({
"success": bool,
"requires_twofactor": bool,
validate.optional("message"): validate.text,
validate.optional("emailauth_needed"): bool,
validate.optional("emaildomain"): validate.text,
validate.optional("emailsteamid"): validate.text,
validate.optional("login_complete"): bool,
validate.optional("captcha_needed"): bool,
validate.optional("captcha_gid"): validate.any(validate.text, int)
})
_captcha_url = "https://steamcommunity.com/public/captcha.php?gid={}"
arguments = PluginArguments(
PluginArgument(
"email",
metavar="EMAIL",
requires=["password"],
help="""
A Steam account email address to access friends/private streams
"""
),
PluginArgument(
"password",
metavar="PASSWORD",
sensitive=True,
help="""
A Steam account password to use with --steam-email.
"""
))
def __init__(self, url):
super().__init__(url)
self.session.http.headers["User-Agent"] = self._user_agent
@property
def donotcache(self):
return str(int(time.time() * 1000))
def encrypt_password(self, email, password):
"""
Get the RSA key for the user and encrypt the users password
:param email: steam account
:param password: password for account
:return: encrypted password
"""
res = self.session.http.get(self._get_rsa_key_url, params=dict(username=email, donotcache=self.donotcache))
rsadata = self.session.http.json(res, schema=self._rsa_key_schema)
rsa = RSA.construct((rsadata["publickey_mod"], rsadata["publickey_exp"]))
cipher = PKCS1_v1_5.new(rsa)
return base64.b64encode(cipher.encrypt(password.encode("utf8"))), rsadata["timestamp"]
def dologin(self, email, password, emailauth="", emailsteamid="", captchagid="-1", captcha_text="", twofactorcode=""):
"""
Logs in to Steam
"""
epassword, rsatimestamp = self.encrypt_password(email, password)
login_data = {
'username': email,
"password": epassword,
"emailauth": emailauth,
"loginfriendlyname": "Streamlink",
"captchagid": captchagid,
"captcha_text": captcha_text,
"emailsteamid": emailsteamid,
"rsatimestamp": rsatimestamp,
"remember_login": True,
"donotcache": self.donotcache,
"twofactorcode": twofactorcode
}
res = self.session.http.post(self._dologin_url, data=login_data)
resp = self.session.http.json(res, schema=self._dologin_schema)
if not resp["success"]:
if resp.get("captcha_needed"):
# special case for captcha
captchagid = resp["captcha_gid"]
log.error("Captcha result required, open this URL to see the captcha: {}".format(
self._captcha_url.format(captchagid)))
try:
captcha_text = self.input_ask("Captcha text")
except FatalPluginError:
captcha_text = None
if not captcha_text:
return False
else:
# If the user must enter the code that was emailed to them
if resp.get("emailauth_needed"):
if not emailauth:
try:
emailauth = self.input_ask("Email auth code required")
except FatalPluginError:
emailauth = None
if not emailauth:
return False
else:
raise SteamLoginFailed("Email auth key error")
# If the user must enter a two factor auth code
if resp.get("requires_twofactor"):
try:
twofactorcode = self.input_ask("Two factor auth code required")
except FatalPluginError:
twofactorcode = None
if not twofactorcode:
return False
if resp.get("message"):
raise SteamLoginFailed(resp["message"])
return self.dologin(email, password,
emailauth=emailauth,
emailsteamid=resp.get("emailsteamid", ""),
captcha_text=captcha_text,
captchagid=captchagid,
twofactorcode=twofactorcode)
elif resp.get("login_complete"):
return True
else:
log.error("Something when wrong when logging in to Steam")
return False
def login(self, email, password):
log.info("Attempting to login to Steam as {}".format(email))
return self.dologin(email, password)
def _get_broadcast_stream(self, steamid, viewertoken=0, sessionid=None):
log.debug("Getting broadcast stream: sessionid={0}".format(sessionid))
res = self.session.http.get(self._get_broadcast_url,
params=dict(broadcastid=0,
steamid=steamid,
viewertoken=viewertoken,
sessionid=sessionid))
return self.session.http.json(res, schema=self._broadcast_schema)
def _get_streams(self):
streamdata = None
if self.get_option("email"):
if self.login(self.get_option("email"), self.get_option("password")):
log.info("Logged in as {0}".format(self.get_option("email")))
self.save_cookies(lambda c: "steamMachineAuth" in c.name)
# Handle steam.tv URLs
if self.matches[1] is not None:
# extract the steam ID from the page
res = self.session.http.get(self.url)
for div in itertags(res.text, 'div'):
if div.attributes.get("id") == "webui_config":
broadcast_data = html_unescape(div.attributes.get("data-broadcast"))
steamid = parse_json(broadcast_data).get("steamid")
self.url = self._watch_broadcast_url + steamid
# extract the steam ID from the URL
steamid = self.match.group(1)
res = self.session.http.get(self.url) # get the page to set some cookies
sessionid = res.cookies.get('sessionid')
while streamdata is None or streamdata["success"] in ("waiting", "waiting_for_start"):
streamdata = self._get_broadcast_stream(steamid,
sessionid=sessionid)
if streamdata["success"] == "ready":
return DASHStream.parse_manifest(self.session, streamdata["url"])
elif streamdata["success"] == "unavailable":
log.error("This stream is currently unavailable")
return
else:
r = streamdata["retry"] / 1000.0
log.info("Waiting for stream, will retry again in {} seconds...".format(r))
time.sleep(r)
__plugin__ = SteamBroadcastPlugin
| {
"content_hash": "901224e0df946d31275acd28423a7cbd",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 122,
"avg_line_length": 40.646288209606986,
"alnum_prop": 0.5711216158143533,
"repo_name": "melmorabity/streamlink",
"id": "058461ef024fb5deec899dff20effdbdaf2434cf",
"size": "9308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/steam.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1537432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
__author__ = "Peter Shipley"
import os
from ISY.IsyEvent import ISYEvent
def main() :
server = ISYEvent()
# you can subscribe to multiple devices
# server.subscribe('10.1.1.25')
server.subscribe(
addr=os.getenv('ISY_ADDR', '10.1.1.36'),
userl=os.getenv('ISY_USER', "admin"),
userp=os.getenv('ISY_PASS', "admin")
)
server.set_process_func(ISYEvent.print_event, "")
try:
print('Use Control-C to exit')
server.events_loop() #no return
# for d in server.event_iter( ignorelist=["_0", "_11"] ):
# server.print_event(d, "")
except KeyboardInterrupt:
print('Exiting')
if __name__ == '__main__' :
main()
exit(0)
| {
"content_hash": "8e52b45f028b12e3fd7e2dc94b8e26d9",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 21.176470588235293,
"alnum_prop": 0.5708333333333333,
"repo_name": "sjthespian/ISYlib-python",
"id": "a8ec5d84cec6b0043f94498e238c976cebf86034",
"size": "743",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bin/isy_showevents.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "1618"
},
{
"name": "Python",
"bytes": "303452"
}
],
"symlink_target": ""
} |
def detail():
item_id = request.args(0) or redirect(UR(c='default', f='index'))
item = db(db.item.id == item_id).select()
return locals()
def item_add_location_list():
itens = db(db.item.id > 0).select(orderby=db.item.item_name)
return locals()
@auth.requires_login()
def add_location():
item_id = request.args(0) or redirect(UR(c='default', f='index'))
item_slug = request.args(1)
item = db(db.item.id == item_id).select()
form = crud.create(db.item_location,
message = T("Item location added with success."),
next = URL(c='catalog', f='detail', args=[item_id, item_slug])
)
return locals()
| {
"content_hash": "aa8a2ad0682472bccc5e26aa0452d8d8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 70,
"avg_line_length": 28.782608695652176,
"alnum_prop": 0.6117824773413897,
"repo_name": "kaaete/web_app",
"id": "6c32c9621e8289b73414b75d5b00c1966843db8c",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/catalog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21909"
},
{
"name": "HTML",
"bytes": "67115"
},
{
"name": "JavaScript",
"bytes": "35596"
},
{
"name": "Python",
"bytes": "48496"
}
],
"symlink_target": ""
} |
"""Tensor utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
__all__ = [
'assert_same_float_dtype',
'assert_scalar',
'assert_scalar_int',
'convert_to_tensor_or_sparse_tensor',
'is_tensor',
'reduce_sum_n',
'remove_squeezable_dimensions',
'with_shape',
'with_same_shape']
# Temporary for backwards compatibility
is_tensor = tensor_util.is_tensor
assert_same_float_dtype = check_ops.assert_same_float_dtype
assert_scalar = check_ops.assert_scalar
convert_to_tensor_or_sparse_tensor = (
sparse_tensor.convert_to_tensor_or_sparse_tensor)
def reduce_sum_n(tensors, name=None):
"""Reduce tensors to a scalar sum.
This reduces each tensor in `tensors` to a scalar via `tf.reduce_sum`, then
adds them via `tf.add_n`.
Args:
tensors: List of tensors, all of the same numeric type.
name: Tensor name, and scope for all other ops.
Returns:
Total loss tensor, or None if no losses have been configured.
Raises:
ValueError: if `losses` is missing or empty.
"""
if not tensors:
raise ValueError('No tensors provided.')
with ops.name_scope(name, 'reduce_sum_n', tensors) as name_scope:
tensors = [
math_ops.reduce_sum(t, name='%s/sum' % t.op.name) for t in tensors]
if len(tensors) == 1:
return tensors[0]
return math_ops.add_n(tensors, name=name_scope)
def remove_squeezable_dimensions(predictions, labels, name=None):
"""Squeeze last dim if ranks of `predictions` and `labels` differ by 1.
This will use static shape if available. Otherwise, it will add graph
operations, which could result in a performance hit.
Args:
predictions: Predicted values, a `Tensor` of arbitrary dimensions.
labels: Label values, a `Tensor` whose dimensions match `predictions`.
name: Name of the op.
Returns:
Tuple of `predictions` and `labels`, possibly with last dim squeezed.
"""
with ops.name_scope(name, 'remove_squeezable_dimensions',
[predictions, labels]):
predictions = ops.convert_to_tensor(predictions)
labels = ops.convert_to_tensor(labels)
predictions_shape = predictions.get_shape()
predictions_rank = predictions_shape.ndims
labels_shape = labels.get_shape()
labels_rank = labels_shape.ndims
if (labels_rank is not None) and (predictions_rank is not None):
# Use static rank.
rank_diff = predictions_rank - labels_rank
if rank_diff == -1:
labels = array_ops.squeeze(labels, [-1])
elif rank_diff == 1:
predictions = array_ops.squeeze(predictions, [-1])
return predictions, labels
# Use dynamic rank.
rank_diff = array_ops.rank(predictions) - array_ops.rank(labels)
if (predictions_rank is None) or (
predictions_shape.dims[-1].is_compatible_with(1)):
predictions = control_flow_ops.cond(
math_ops.equal(1, rank_diff),
lambda: array_ops.squeeze(predictions, [-1]),
lambda: predictions)
if (labels_rank is None) or (
labels_shape.dims[-1].is_compatible_with(1)):
labels = control_flow_ops.cond(
math_ops.equal(-1, rank_diff),
lambda: array_ops.squeeze(labels, [-1]),
lambda: labels)
return predictions, labels
def _all_equal(tensor0, tensor1):
with ops.name_scope('all_equal', values=[tensor0, tensor1]) as scope:
return math_ops.reduce_all(
math_ops.equal(tensor0, tensor1, name='equal'), name=scope)
def _is_rank(expected_rank, actual_tensor):
"""Returns whether actual_tensor's rank is expected_rank.
Args:
expected_rank: Integer defining the expected rank, or tensor of same.
actual_tensor: Tensor to test.
Returns:
New tensor.
"""
with ops.name_scope('is_rank', values=[actual_tensor]) as scope:
expected = ops.convert_to_tensor(expected_rank, name='expected')
actual = array_ops.rank(actual_tensor, name='actual')
return math_ops.equal(expected, actual, name=scope)
def _is_shape(expected_shape, actual_tensor, actual_shape=None):
"""Returns whether actual_tensor's shape is expected_shape.
Args:
expected_shape: Integer list defining the expected shape, or tensor of same.
actual_tensor: Tensor to test.
actual_shape: Shape of actual_tensor, if we already have it.
Returns:
New tensor.
"""
with ops.name_scope('is_shape', values=[actual_tensor]) as scope:
is_rank = _is_rank(array_ops.size(expected_shape), actual_tensor)
if actual_shape is None:
actual_shape = array_ops.shape(actual_tensor, name='actual')
shape_equal = _all_equal(
ops.convert_to_tensor(expected_shape, name='expected'),
actual_shape)
return math_ops.logical_and(is_rank, shape_equal, name=scope)
def _assert_shape_op(expected_shape, actual_tensor):
"""Asserts actual_tensor's shape is expected_shape.
Args:
expected_shape: List of integers defining the expected shape, or tensor of
same.
actual_tensor: Tensor to test.
Returns:
New assert tensor.
"""
with ops.name_scope('assert_shape', values=[actual_tensor]) as scope:
actual_shape = array_ops.shape(actual_tensor, name='actual')
is_shape = _is_shape(expected_shape, actual_tensor, actual_shape)
return control_flow_ops.Assert(
is_shape, [
'Wrong shape for %s [expected] [actual].' % actual_tensor.name,
expected_shape,
actual_shape
], name=scope)
def with_same_shape(expected_tensor, tensor):
"""Assert tensors are the same shape, from the same graph.
Args:
expected_tensor: Tensor with expected shape.
tensor: Tensor of actual values.
Returns:
Tuple of (actual_tensor, label_tensor), possibly with assert ops added.
"""
with ops.name_scope('%s/' % tensor.op.name, values=[expected_tensor, tensor]):
tensor_shape = expected_tensor.get_shape()
expected_shape = (
tensor_shape.as_list() if tensor_shape.is_fully_defined()
else array_ops.shape(expected_tensor, name='expected_shape'))
return with_shape(expected_shape, tensor)
def with_shape(expected_shape, tensor):
"""Asserts tensor has expected shape.
If tensor shape and expected_shape, are fully defined, assert they match.
Otherwise, add assert op that will validate the shape when tensor is
evaluated, and set shape on tensor.
Args:
expected_shape: Expected shape to assert, as a 1D array of ints, or tensor
of same.
tensor: Tensor whose shape we're validating.
Returns:
tensor, perhaps with a dependent assert operation.
Raises:
ValueError: if tensor has an invalid shape.
"""
if isinstance(tensor, sparse_tensor.SparseTensor):
raise ValueError('SparseTensor not supported.')
# Shape type must be 1D int32.
if tensor_util.is_tensor(expected_shape):
if expected_shape.dtype.base_dtype != dtypes.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
if isinstance(expected_shape, (list, tuple)):
if not expected_shape:
expected_shape = np.asarray([], dtype=np.int32)
else:
np_expected_shape = np.asarray(expected_shape)
expected_shape = (
np.asarray(expected_shape, dtype=np.int32)
if np_expected_shape.dtype == np.int64 else np_expected_shape)
if isinstance(expected_shape, np.ndarray):
if expected_shape.ndim > 1:
raise ValueError(
'Invalid rank %s for shape %s expected of tensor %s.' % (
expected_shape.ndim, expected_shape, tensor.name))
if expected_shape.dtype != np.int32:
raise ValueError(
'Invalid dtype %s for shape %s expected of tensor %s.' % (
expected_shape.dtype, expected_shape, tensor.name))
actual_shape = tensor.get_shape()
if (not actual_shape.is_fully_defined()
or tensor_util.is_tensor(expected_shape)):
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
if (not tensor_util.is_tensor(expected_shape)
and (len(expected_shape) < 1)):
# TODO(irving): Remove scalar special case
return array_ops.reshape(tensor, [])
with ops.control_dependencies([_assert_shape_op(expected_shape, tensor)]):
result = array_ops.identity(tensor)
if not tensor_util.is_tensor(expected_shape):
result.set_shape(expected_shape)
return result
if (not tensor_util.is_tensor(expected_shape) and
not actual_shape.is_compatible_with(expected_shape)):
if (len(expected_shape) < 1) and actual_shape.is_compatible_with([1]):
# TODO(irving): Remove scalar special case.
with ops.name_scope('%s/' % tensor.op.name, values=[tensor]):
return array_ops.reshape(tensor, [])
raise ValueError('Invalid shape for tensor %s, expected %s, got %s.' % (
tensor.name, expected_shape, actual_shape))
return tensor
def assert_scalar_int(tensor, name=None):
"""Assert `tensor` is 0-D, of type `tf.int32` or `tf.int64`.
Args:
tensor: `Tensor` to test.
name: Name of the op and of the new `Tensor` if one is created.
Returns:
`tensor`, for chaining.
Raises:
ValueError: if `tensor` is not 0-D, of integer type.
"""
with ops.name_scope(name, 'assert_scalar_int', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor)
data_type = tensor.dtype
if not data_type.base_dtype.is_integer:
raise ValueError('Expected integer type for %s, received type: %s.'
% (tensor.name, data_type))
return check_ops.assert_scalar(tensor, name=name_scope)
| {
"content_hash": "466e29d38ddf7169d8406a59fcf18603",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 80,
"avg_line_length": 36.13978494623656,
"alnum_prop": 0.6766835267281563,
"repo_name": "unsiloai/syntaxnet-ops-hack",
"id": "8839da2947aff58447e6fd28422c29e2b666ec3d",
"size": "10773",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/framework/python/framework/tensor_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "194561"
},
{
"name": "C++",
"bytes": "29090434"
},
{
"name": "CMake",
"bytes": "619418"
},
{
"name": "Go",
"bytes": "940282"
},
{
"name": "Java",
"bytes": "380724"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37232"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "284579"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "24901060"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "353443"
}
],
"symlink_target": ""
} |
import os, os.path, sys, argparse
SOURCE_TEMPLATE = """
#include "internal/Embedded.h"
#include <string>
#include <unordered_map>
namespace {
%s
const std::unordered_map<std::string, EmbeddedContent> embedded = {
%s
};
} // namespace
const EmbeddedContent* findEmbeddedContent(const std::string& name) {
const auto found = embedded.find(name);
if (found == embedded.end()) {
return nullptr;
}
return &found->second;
}\n
"""
MAX_SLICE = 16
def as_byte(data):
if sys.version_info < (3,):
return ord(data)
else:
return data
def parse_arguments():
parser = argparse.ArgumentParser(description="Embedded content generator")
parser.add_argument('--output', '-o', action='store', dest='output_file', type=str, help='Output File', required=True)
parser.add_argument('--file', '-f', action='store', nargs='+', dest='input_file', type=str, help='Output File', required=True)
return parser.parse_args()
def create_file_byte(name, file_bytes):
output = []
output.append(' const char %s[%d] = {\n' % (name, len(file_bytes) + 1))
for start in range(0, len(file_bytes), MAX_SLICE):
output.append(" " + "".join("'\\x{:02x}',".format(as_byte(x)) for x in file_bytes[start:start+MAX_SLICE]) + "\n")
output.append(' 0x00,\n')
output.append(' };\n')
return ''.join(output)
def create_file_info(file_list):
output = []
for name, base, length in file_list:
output.append(' {"/%s", { %s, %d }},\n' % (base, name, length))
return ''.join(output)
def main():
args = parse_arguments()
files = []
index = 1
file_byte_entries = []
for file_name in args.input_file:
with open(file_name, 'rb') as f:
file_bytes = f.read()
name = "fileData%d" % index
index += 1
files.append((name, os.path.basename(file_name), len(file_bytes)))
file_byte_entries.append(create_file_byte(name, file_bytes))
with open(args.output_file, 'w') as output_file:
output_file.write(SOURCE_TEMPLATE % (''.join(file_byte_entries), create_file_info(files)))
if __name__ == '__main__':
main()
| {
"content_hash": "8072a8428338c1ff0852fc7314fa1006",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 130,
"avg_line_length": 27.098765432098766,
"alnum_prop": 0.5995444191343964,
"repo_name": "mattgodbolt/seasocks",
"id": "7bdd21ba8b92f2336dc051d463aacef99adc1810",
"size": "2218",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/gen_embedded.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "112291"
},
{
"name": "C++",
"bytes": "991377"
},
{
"name": "CMake",
"bytes": "10254"
},
{
"name": "CSS",
"bytes": "743"
},
{
"name": "HTML",
"bytes": "2768"
},
{
"name": "JavaScript",
"bytes": "2031"
},
{
"name": "Makefile",
"bytes": "786"
},
{
"name": "Python",
"bytes": "6010"
},
{
"name": "Shell",
"bytes": "687"
}
],
"symlink_target": ""
} |
"""C++ compiler code generator."""
import os
import generators.cpp_templates
import scope
import spgen_parser
class Properties:
OUTPUT_CPP_HEADER = 'outputCppHeader'
OUTPUT_CPP_SOURCE = 'outputCppSource'
OUTPUT_CPP_NAMESPACE = 'outputCppNamespace'
OUTPUT_DIRECTORY = 'outputDirectory'
DEFAULT_MODULE_NAME = 'defaultModuleName'
module_name = 'C++ code generator'
def generate_header_file(output_header_file, lexer_transition_table, rules, properties):
template = generators.cpp_templates.generate_header_template(
module_name = properties[Properties.DEFAULT_MODULE_NAME],
rules = [r for r, v in rules.items() if v.type == spgen_parser.RuleTypes.TOKEN]
)
output = scope.serialize(template)
with open(output_header_file, 'w') as f:
f.write(output)
def generate_source_file(output_source_file, header_file, lexer_transition_table, rules, properties):
template = generators.cpp_templates.generate_source_template(
module_name = properties[Properties.DEFAULT_MODULE_NAME],
header_file = header_file
)
output = scope.serialize(template)
with open(output_source_file, 'w') as f:
f.write(output)
def generate_code(info):
if Properties.OUTPUT_CPP_SOURCE not in info.properties:
info.properties[Properties.OUTPUT_CPP_SOURCE] = '{0}Parser.cpp'.format(info.properties[Properties.DEFAULT_MODULE_NAME])
if Properties.OUTPUT_CPP_HEADER not in info.properties:
info.properties[Properties.OUTPUT_CPP_HEADER] = '{0}Parser.h'.format(info.properties[Properties.DEFAULT_MODULE_NAME])
if Properties.OUTPUT_CPP_NAMESPACE not in info.properties:
info.properties[Properties.OUTPUT_CPP_NAMESPACE] = '{0}.Parser'.format(info.properties[Properties.DEFAULT_MODULE_NAME])
print('Properties:')
for name, value in sorted(info.properties.items()):
print(' {0}: {1}'.format(name, value))
output_header_file = os.path.normpath(os.path.join(
os.getcwd(),
info.properties[Properties.OUTPUT_DIRECTORY],
info.properties[Properties.OUTPUT_CPP_HEADER]))
output_source_file = os.path.normpath(os.path.join(
os.getcwd(),
info.properties[Properties.OUTPUT_DIRECTORY],
info.properties[Properties.OUTPUT_CPP_SOURCE]))
generate_header_file(
output_header_file,
info.lexer_transition_table,
info.rules,
info.properties)
generate_source_file(
output_source_file,
info.properties[Properties.OUTPUT_CPP_HEADER],
info.lexer_transition_table,
info.rules,
info.properties)
| {
"content_hash": "3f01e9f82cf41e4a92fd61f2bb9126bc",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 121,
"avg_line_length": 32.58108108108108,
"alnum_prop": 0.7561177934467026,
"repo_name": "lrgar/spgen",
"id": "f01b817c953a0d26353934c9b79e11b838b25090",
"size": "2552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spgen/generators/cpp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "375"
},
{
"name": "Python",
"bytes": "89584"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
"""
@author: Deniz Altinbuken, Emin Gun Sirer
@note: Example membership object
@copyright: See LICENSE
"""
class Membership():
def __init__(self):
self.members = set()
def add(self, member):
if member not in self.members:
self.members.add(member)
def remove(self, member):
if member in self.members:
self.members.remove(member)
else:
raise KeyError(member)
def __str__(self):
return " ".join([str(m) for m in self.members])
| {
"content_hash": "1f57d0cf4ac2c2934226ca0b27d8344b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 55,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.5895953757225434,
"repo_name": "milannic/expCPython",
"id": "d66c3d5041c32b003f7f19add59e5e9bd6f09a33",
"size": "519",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "concoord-1.0.2/build/lib.linux-x86_64-2.7/concoord/object/membership.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "452139"
},
{
"name": "C++",
"bytes": "2521"
},
{
"name": "CSS",
"bytes": "1879"
},
{
"name": "JavaScript",
"bytes": "3081"
},
{
"name": "Objective-C",
"bytes": "951"
},
{
"name": "PHP",
"bytes": "141"
},
{
"name": "Perl",
"bytes": "22657"
},
{
"name": "Python",
"bytes": "1093970"
},
{
"name": "Shell",
"bytes": "8885"
}
],
"symlink_target": ""
} |
"""Factories to help in tests."""
from factory import PostGenerationMethodCall, Sequence
from factory.alchemy import SQLAlchemyModelFactory
from personal_website.database import db
from personal_website.user.models import User
class BaseFactory(SQLAlchemyModelFactory):
"""Base factory."""
class Meta:
"""Factory configuration."""
abstract = True
sqlalchemy_session = db.session
class UserFactory(BaseFactory):
"""User factory."""
username = Sequence(lambda n: 'user{0}'.format(n))
email = Sequence(lambda n: 'user{0}@example.com'.format(n))
password = PostGenerationMethodCall('set_password', 'example')
active = True
class Meta:
"""Factory configuration."""
model = User
| {
"content_hash": "f3229f65a08fd6166cf50e1b7ef049c9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 66,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6887417218543046,
"repo_name": "arewellborn/Personal-Website",
"id": "f3fb97cbef997cea3248ed9a6e897a0f6b9c87c0",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4414"
},
{
"name": "HTML",
"bytes": "12401"
},
{
"name": "JavaScript",
"bytes": "181413"
},
{
"name": "Python",
"bytes": "37533"
}
],
"symlink_target": ""
} |
import re
def Parser(text, sub_linefeed):
text = text.decode('utf-8')
messages = {}
# title
title_pattern = re.compile(u"题目[::.](.*?)\n")
messages['title'] = re.findall(title_pattern, text)
if len(messages['title']) == 1:
messages['title'] = messages['title'][0].strip()
else:
messages['title'] = ''
# time
time_pattern = re.compile(u"时间[::.](.*?)\n")
messages['time'] = re.findall(time_pattern, text)
if len(messages['time']) == 1:
messages['time'] = messages['time'][0].strip()
else:
messages['time'] = ''
# address
address_pattern = re.compile(u"地点[::.](.*?)\n")
messages['address'] = re.findall(address_pattern, text)
if len(messages['address']) == 1:
messages['address'] = messages['address'][0].strip()
else:
messages['address'] = ''
# speaker
speaker_pattern = re.compile(u"报告人[::.](.*?)\n")
messages['speaker'] = re.findall(speaker_pattern, text)
if len(messages['speaker']) == 1:
messages['speaker'] = messages['speaker'][0].strip()
else:
messages['speaker'] = ''
# abstract
abstract_pattern = re.compile(u"报告摘要[::.]([\s\S]*)邀请人", re.S)
messages['abstract'] = re.findall(abstract_pattern, text)
if len(messages['abstract']) == 1:
messages['abstract'] = sub_linefeed(messages['abstract'][0].strip())
else:
messages['abstract'] = ''
# biography
biography_pattern = re.compile(u"报告人简介[::.]([\s\S]*)报告摘要", re.S)
messages['biography'] = re.findall(biography_pattern, text)
if len(messages['biography']) == 1:
messages['biography'] = sub_linefeed(messages['biography'][0].strip())
else:
biography_pattern = re.compile(u"报告人简介[::.]([\s\S]*)邀请人", re.S)
messages['biography'] = re.findall(biography_pattern, text)
if len(messages['biography']) == 1:
messages['biography'] = sub_linefeed(messages['biography'][0].strip())
else:
messages['biography'] = ''
return messages
| {
"content_hash": "2bccba69cc17770b283e2951f46598ac",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 30.295081967213115,
"alnum_prop": 0.6352813852813853,
"repo_name": "HeadCow/ARPS",
"id": "927f58bdf321fef183accdc8b02b7008ba22b51e",
"size": "1952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "report_crawler/report_crawler/parser/parser_001/_W/WHU001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "611954"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
import schoolopy
import yaml
with open('example_config.yml', 'r') as f:
cfg = yaml.load(f)
sc = schoolopy.Schoology(schoolopy.Auth(cfg['key'], cfg['secret']))
sc.limit = 10 # Only retrieve 10 objects max
print('Your name is %s' % sc.get_me().name_display)
for update in sc.get_feed():
user = sc.get_user(update.uid)
print('By: ' + user.name_display)
print(update.body[:40].replace('\r\n', ' ').replace('\n', ' ') + '...')
print('%d likes\n' % update.likes)
| {
"content_hash": "9ccc7562f7bb2745bf4ad5dd2c177c9f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 32.06666666666667,
"alnum_prop": 0.6257796257796258,
"repo_name": "ErikBoesen/schoolopy",
"id": "305cd661ff4abaff9862903351ed405a7903c025",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example-twolegged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "98762"
}
],
"symlink_target": ""
} |
"""
This part of code is the Deep Q Network (DQN) brain.
view the tensorboard picture about this DQN structure on: https://morvanzhou.github.io/tutorials/machine-learning/reinforcement-learning/4-3-DQN3/#modification
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
Tensorflow: r1.2
"""
import numpy as np
import tensorflow as tf
np.random.seed(1)
tf.set_random_seed(1)
# Deep Q Network off-policy
class DeepQNetwork:
def __init__(
self,
n_actions,
n_features,
learning_rate=0.01,
reward_decay=0.9,
e_greedy=0.9,
replace_target_iter=300,
memory_size=500,
batch_size=32,
e_greedy_increment=None,
output_graph=False,
):
self.n_actions = n_actions
self.n_features = n_features
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon_max = e_greedy
self.replace_target_iter = replace_target_iter
self.memory_size = memory_size
self.batch_size = batch_size
self.epsilon_increment = e_greedy_increment
self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max
# total learning step
self.learn_step_counter = 0
# initialize zero memory [s, a, r, s_]
self.memory = np.zeros((self.memory_size, n_features * 2 + 2))
# consist of [target_net, evaluate_net]
self._build_net()
t_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_net')
e_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_net')
self.target_replace_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)]
self.sess = tf.Session()
if output_graph:
# $ tensorboard --logdir=logs
# tf.train.SummaryWriter soon be deprecated, use following
tf.summary.FileWriter("logs/", self.sess.graph)
self.saver = tf.train.Saver()
self.sess.run(tf.global_variables_initializer())
self.cost_his = []
def _build_net(self):
# ------------------ all inputs ------------------------
self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # input State
self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # input Next State
self.r = tf.placeholder(tf.float32, [None, ], name='r') # input Reward
self.a = tf.placeholder(tf.int32, [None, ], name='a') # input Action
w_initializer, b_initializer = tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1)
# ------------------ build evaluate_net ------------------
with tf.variable_scope('eval_net'):
e1 = tf.layers.dense(self.s, 200, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
e2 = tf.layers.dense(e1, 20, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
self.q_eval = tf.layers.dense(e2, self.n_actions, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
# ------------------ build target_net ------------------
with tf.variable_scope('target_net'):
t1 = tf.layers.dense(self.s_, 200, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
t2 = tf.layers.dense(t1, 20, tf.nn.relu, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
self.q_next = tf.layers.dense(t2, self.n_actions, kernel_initializer=w_initializer,
bias_initializer=b_initializer)
with tf.variable_scope('q_target'):
q_target = self.r + self.gamma * tf.reduce_max(self.q_next, axis=1, name='Qmax_s_') # shape=(None, )
self.q_target = tf.stop_gradient(q_target)
with tf.variable_scope('q_eval'):
a_indices = tf.stack([tf.range(tf.shape(self.a)[0], dtype=tf.int32), self.a], axis=1)
self.q_eval_wrt_a = tf.gather_nd(params=self.q_eval, indices=a_indices) # shape=(None, )
with tf.variable_scope('loss'):
self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval_wrt_a, name='TD_error'))
with tf.variable_scope('train'):
self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss)
def store_transition(self, s, a, r, s_):
if not hasattr(self, 'memory_counter'):
self.memory_counter = 0
transition = np.hstack((s, [a, r], s_))
# replace the old memory with new memory
index = self.memory_counter % self.memory_size
self.memory[index, :] = transition
self.memory_counter += 1
def choose_action(self, observation):
# to have batch dimension when feed into tf placeholder
observation = observation[np.newaxis, :]
if np.random.uniform() < self.epsilon:
# forward feed the observation and get q value for every actions
actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation})
action = np.argmax(actions_value)
else:
action = np.random.randint(0, self.n_actions)
return action
def learn(self):
# check to replace target parameters
if self.learn_step_counter % self.replace_target_iter == 0:
self.sess.run(self.target_replace_op)
print('\ntarget_params_replaced\n')
# sample batch memory from all memory
if self.memory_counter > self.memory_size:
sample_index = np.random.choice(self.memory_size, size=self.batch_size)
else:
sample_index = np.random.choice(self.memory_counter, size=self.batch_size)
batch_memory = self.memory[sample_index, :]
_, cost = self.sess.run(
[self._train_op, self.loss],
feed_dict={
self.s: batch_memory[:, :self.n_features],
self.a: batch_memory[:, self.n_features],
self.r: batch_memory[:, self.n_features + 1],
self.s_: batch_memory[:, -self.n_features:],
})
self.cost_his.append(cost)
# increasing epsilon
self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
self.learn_step_counter += 1
def plot_cost(self):
import matplotlib.pyplot as plt
plt.plot(np.arange(len(self.cost_his)), self.cost_his)
plt.ylabel('Cost')
plt.xlabel('training steps')
plt.show()
def save_model(self, loc='model'):
save_path = self.saver.save(self.sess, "./" + loc + "/model.ckpt")
print("Model saved in file: %s" % save_path)
def restore_model(self, loc='model'):
print("Restored model")
self.saver.restore(self.sess, "./" + loc + "/model.ckpt")
if __name__ == '__main__':
DQN = DeepQNetwork(3,4, output_graph=True) | {
"content_hash": "695cee3327295bc402f2a626b57bd288",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 159,
"avg_line_length": 41.80232558139535,
"alnum_prop": 0.5826147426981919,
"repo_name": "ZhiangChen/soft_arm",
"id": "263d7489c219445273b88a73b1513e592aa3a0e4",
"size": "7190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_nets/DQN.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "6491"
},
{
"name": "CMake",
"bytes": "1048"
},
{
"name": "Jupyter Notebook",
"bytes": "16415"
},
{
"name": "OpenEdge ABL",
"bytes": "5928654"
},
{
"name": "Python",
"bytes": "167187"
}
],
"symlink_target": ""
} |
import tkinter as tk
from PIL import Image, ImageTk
import play
import functools
from video import Video
def display_list(db, cfg):
foo = VideoDisplay(db, cfg)
foo.run()
return foo.getConfig()
class VideoDisplay:
SERIES_ID='seriesID'
def __init__(self, db, cfg):
self.cfg = cfg
series = {}
for a in db:
if a[VideoDisplay.SERIES_ID] is not None and a[VideoDisplay.SERIES_ID] not in series.keys():
series[a[VideoDisplay.SERIES_ID]] = a.getSeries()
self.db = db.copy()
self.db.extend(series.values())
self.root = tk.Tk()
self.searchFrame = tk.Frame(self.root, background="#D9D9D9")
self.searchFrame.pack(side="top")
self.canvas = tk.Canvas(self.root, borderwidth=0, background="#ffffff")
self.canvas.bind_all("<MouseWheel>", self._on_ymousewheel)
self.canvas.bind_all("<4>", self._on_ymousewheel)
self.canvas.bind_all("<5>", self._on_ymousewheel)
self.canvas.bind_all("<Shift-MouseWheel>", self._on_xmousewheel)
self.canvas.bind_all("<ButtonPress-1>", self.startMove)
self.canvas.bind_all("<ButtonRelease-1>", self.stopMove)
self.canvas.bind_all("<B1-Motion>", self.onMotion)
self.setupScrollbars()
self.canvas.pack(side="bottom", fill="both", expand=True)
self.sortKey="Title"
self.sortDir=1
self.makeHeader()
def getConfig(self):
return self.cfg
def run(self):
self.refresh()
self.root.mainloop()
def clear(self):
self.canvas.delete('all')
self.frame = tk.Frame(self.canvas, background="#ffffff")
self.canvas.create_window((4,4), window=self.frame, anchor="nw")
self.frame.bind("<Configure>", self.onFrameConfigure)
def makeHeader(self):
tk.Button(self.searchFrame, text="Preferences", command=self.showPreferences).pack(side="left")
tk.Label(self.searchFrame, text="Search:", bg="#D9D9D9", padx=10).pack(side="left")
self.searchEntry = tk.Entry(self.searchFrame, width=40)
self.searchEntry.delete(0, tk.END)
self.searchEntry.pack(side="left", fill="x")
tk.Button(self.searchFrame, text="Clear", command=self.clearSearch).pack(side="right")
tk.Button(self.searchFrame, text="Filter", command=self.refresh).pack(side="right")
def showPreferences(self):
self.prefWin = tk.Toplevel(self.root)
frame = tk.Frame(self.prefWin)
frame.pack(side='top')
label = tk.Label(frame, text='Search Path for Videos (takes effect on restart of program): ')
label.pack(side="left")
self.videoSearchEntry = tk.Entry(frame, width=40)
self.videoSearchEntry.delete(0, tk.END)
self.videoSearchEntry.insert(0, self.cfg['search_path'])
self.videoSearchEntry.pack(side="left", fill="x")
tk.Button(frame, text='Update', command=self.updateSearchPath).pack(side='left')
frame2 = tk.Frame(self.prefWin)
frame2.pack(side='top')
label = tk.Label(frame2, text='Columns: ')
label.pack(side="left")
self.colEntry = tk.Entry(frame2, width=40,)
self.colEntry.delete(0, tk.END)
self.colEntry.insert(0, toSimpleListStr(self.cfg['columns']))
self.colEntry.pack(side="left", fill="x")
tk.Button(frame2, text='Update', command=self.updateColumns).pack(side='left')
frame2ish = tk.Frame(self.prefWin)
frame2ish.pack(side='top')
tk.Label(frame2ish, text="Available Columns:").pack(side='left')
tk.Label(frame2ish, text=toSimpleListStr(getAvailableColumns(), True),
wraplength=350
).pack(side='left')
frame3 = tk.Frame(self.prefWin)
frame3.pack(side='top')
label = tk.Label(frame3, text='Video Player: ')
label.pack(side="left")
self.player = tk.StringVar(self.root)
self.player.set(self.cfg['video_player'])
self.player.trace('w', self.changePlayer)
tk.OptionMenu(frame3, self.player, *getPlayers()).pack(side='left')
def changePlayer(self, *args):
self.cfg['video_player'] = self.player.get()
self.refresh()
def updateSearchPath(self):
self.cfg['search_path'] = self.videoSearchEntry.get()
def updateColumns(self):
self.cfg['columns'] = fromSimpleListStr(self.colEntry.get())
self.refresh()
def clearSearch(self):
self.searchEntry.delete(0, 'end')
self.refresh()
#@profile
def refresh(self):
self.clear()
self.canvas.yview_moveto(0)
self.canvas.xview_moveto(0)
if self.searchEntry.get() != "":
self.displaying = self.performSearch()
else:
self.displaying = [a for a in self.db]
# this is slow
self.sortDisplaying()
self.display_sort_column_heads(0)
ro=1
for line in self.displaying:
self.display_video(line, ro)
ro+=1
def sortDisplaying(self):
toSortTotal = []
toSortSeries = {}
for a in self.displaying:
if a.hasSeries():
s = a.getSeries()
if s.getId() not in toSortSeries.keys():
toSortSeries[s.getId()] = []
toSortSeries[s.getId()].append(a)
else:
toSortTotal.append(a)
toSortTotal.sort(key=functools.cmp_to_key(self.videoCmp))
self.displaying = []
for a in toSortTotal:
self.displaying.append(a)
if a.getId() in toSortSeries.keys():
toSortSeries[a.getId()].sort(key=functools.cmp_to_key(self.videoCmp))
self.displaying.extend(toSortSeries[a.getId()])
#self.displaying.sort(key=functools.cmp_to_key(self.videoCmp))
def performSearch(self):
out = []
gen_str, col_search = parseSearch(self.searchEntry.get())
for vid in self.db:
for col in self.cfg['columns']:
vidc = no_none_get(vid, col, 'N/A')
if col in col_search.keys() and col_search[col] in vidc:
out.append(vid)
break
if gen_str is not None and gen_str in vidc:
out.append(vid)
break
return out
#@profile
def videoCmp(self, a, b):
# always put series entries before the series values
# all the series stuff is a hack right now
if a[VideoDisplay.SERIES_ID] == b['imdbID']:
return 1
elif b[VideoDisplay.SERIES_ID] == a['imdbID']:
return -1
# put all items in a series together
# if we are not part of the same series (or both not part of the series) compare with series item
if a[VideoDisplay.SERIES_ID] != b[VideoDisplay.SERIES_ID]:
if a[VideoDisplay.SERIES_ID] is not None:
a = a.getSeries()
if b[VideoDisplay.SERIES_ID] is not None:
b = b.getSeries()
ak,bk = possiblyToIntTuples(no_none_get(a,self.sortKey,"N/A"),
no_none_get(b,self.sortKey,"N/A"))
if ak==bk:
at = no_none_get(a,'Title',"N/A")
bt = no_none_get(b, 'Title',"N/A")
if at > bt:
return 1
elif at < bt:
return -1
return 0
if ak > bk:
return self.sortDir
return -1*self.sortDir
def display_sort_column_heads(self, ro):
col = 0
for column in self.cfg['columns']:
name = column
if name == self.sortKey:
if self.sortDir == 1:
name += "↓"
else:
name += "↑"
lb = tk.Label(self.frame, text=name)
lb.bind("<Button-1>", make_toggle_key(self, column))
lb.grid(row=ro, column=col, sticky=tk.W+tk.E+tk.N+tk.S)
col+=1
def toggleKey(self, key):
if key == self.sortKey:
self.sortDir *= -1
else:
self.sortKey = key
self.sortDir = 1
self.refresh()
#@profile
def display_video(self, line, ro):
col = 0
playerfn = make_play(line, self.cfg, self)
frF = self.frame
lbO = tk.Label
frO = tk.Frame
for column in self.cfg['columns']:
tmp = no_none_get(line, column, "N/A")
if tmp != "N/A" and column == "PosterFile":
photo = line.getPosterOfSize(100,150)
lb = lbO(frF, image=photo, height=150, width=100, borderwidth="1", relief="solid",
anchor=tk.NW, justify=tk.CENTER,
)
lb.image = photo
lb.bind("<Double-Button-1>", playerfn)
else:
background = "#BBBBBB"
if self.searchEntry.get() != "":
if self.searchEntry.get() in tmp:
background = "#FFF700"
width = len(tmp)*10
if width > 320:
width = 320
if len(tmp) > 450:
tmp = tmp[:450]+"..."
f = frO(frF, height=150, width=width)
f.pack_propagate(0)
lbf = lbO(f, text=tmp, borderwidth="1", relief="solid",
anchor=tk.N, justify=tk.LEFT, wraplength=300,
bg=background
)
lbf.pack(fill=tk.BOTH, expand=1)
lbf.bind("<Double-Button-1>", playerfn)
lb = f
lb.grid(row=ro, column=col, sticky=tk.W+tk.E+tk.N+tk.S)
col+=1
def startMove(self, event):
self.x = event.x
self.y = event.y
def stopMove(self, event):
self.x = None
self.y = None
def onMotion(self, event):
deltax = event.x - self.x
deltay = event.y - self.y
self.canvas.yview_scroll(-deltay, "units")
self.canvas.xview_scroll(-deltax, "units")
def onFrameConfigure(self, e):
'''Reset the scroll region to encompass the inner frame'''
self.canvas.configure(scrollregion=self.canvas.bbox("all"),
xscrollincrement=1,
yscrollincrement=1)
def _on_ymousewheel(self, event):
self.canvas.yview_scroll(int((event.num-4.5)*30), "units")
def _on_xmousewheel(self, event):
self.canvas.xview_scroll(int((event.num-4.5)*30), "units")
def setupScrollbars(self):
vsb = tk.Scrollbar(self.root, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=vsb.set)
vsb.pack(side="right", fill="y")
xvsb = tk.Scrollbar(self.root, orient="horizontal", command=self.canvas.xview)
self.canvas.configure(xscrollcommand=xvsb.set)
xvsb.pack(side="bottom", fill="x")
def make_play(vid, cfg, self):
def f(a):
self.stopMove(None)
player = getattr(play, cfg['video_player']+'_play')
player(vid)
return f
def make_toggle_key(self, column):
def f(e):
self.toggleKey(column)
return f
def getAvailableColumns():
return Video.getAttrs()
def fromSimpleListStr(sts):
return trimAll(sts.split(','))
def trimAll(lis):
out = []
for a in lis:
out.append(a.rstrip().lstrip())
return out
def toSimpleListStr(cols, sort=False):
cols = list(cols)
if sort:
cols.sort()
return ', '.join(cols)
def getPlayers():
opts = dir(play)
tmp = []
for a in opts:
if '_play' in a:
ind = a.find('_play')
tmp.append(a[0:ind])
return tmp
def parseSearch(strs):
strs = strs.rstrip().lstrip()
poss = strs.split(' ')
general = None
data = {}
for a in poss:
if '=' in a:
col, val = a.split('=',1)
data[col] = val
else:
if general is None:
general = a
else:
general = general + ' ' + a
return general, data
def all_same(a):
tmp = next(a)
for b in a:
if tmp != b:
return False
return True
#@profile
def possiblyToIntTuples(*args):
out = []
if all_same(len(a.split()) for a in args):
t = list(a.split() for a in args)
for lines in zip(*t):
out.append(possiblyToInt(*lines))
return zip(*out)
return args
#@profile
def possiblyToInt(*args):
try:
out = []
for a in args:
out.append(int(a))
return out
except ValueError:
return args
def no_none_get(a, b, c):
tmp = a[b]
if tmp is None:
return c
return tmp
| {
"content_hash": "07d42e8fc4971c956162a54271373c95",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 99,
"avg_line_length": 26.883248730964468,
"alnum_prop": 0.6675793051359517,
"repo_name": "Ben0mega/VideoViewer",
"id": "ed4cfe578d34e6573db48f2499d895b5388b0842",
"size": "10596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "display.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25421"
}
],
"symlink_target": ""
} |
"""Test the kernels service API."""
import json
import sys
import time
from requests import HTTPError
from traitlets.config import Config
from tornado.httpclient import HTTPRequest
from tornado.ioloop import IOLoop
from tornado.websocket import websocket_connect
from unittest import SkipTest
from jupyter_client.kernelspec import NATIVE_KERNEL_NAME
from notebook.utils import url_path_join
from notebook.tests.launchnotebook import NotebookTestBase, assert_http_error
try:
from jupyter_client import AsyncMultiKernelManager
async_testing_enabled = True
except ImportError:
async_testing_enabled = False
class KernelAPI(object):
"""Wrapper for kernel REST API requests"""
def __init__(self, request, base_url, headers):
self.request = request
self.base_url = base_url
self.headers = headers
def _req(self, verb, path, body=None):
response = self.request(verb,
url_path_join('api/kernels', path), data=body)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()['message']
except:
pass
response.raise_for_status()
return response
def list(self):
return self._req('GET', '')
def get(self, id):
return self._req('GET', id)
def start(self, name=NATIVE_KERNEL_NAME):
body = json.dumps({'name': name})
return self._req('POST', '', body)
def shutdown(self, id):
return self._req('DELETE', id)
def interrupt(self, id):
return self._req('POST', url_path_join(id, 'interrupt'))
def restart(self, id):
return self._req('POST', url_path_join(id, 'restart'))
def websocket(self, id):
loop = IOLoop()
loop.make_current()
req = HTTPRequest(
url_path_join(self.base_url.replace('http', 'ws', 1), 'api/kernels', id, 'channels'),
headers=self.headers,
)
f = websocket_connect(req)
return loop.run_sync(lambda : f)
class KernelAPITest(NotebookTestBase):
"""Test the kernels web service API"""
def setUp(self):
self.kern_api = KernelAPI(self.request,
base_url=self.base_url(),
headers=self.auth_headers(),
)
def tearDown(self):
for k in self.kern_api.list().json():
self.kern_api.shutdown(k['id'])
def test_no_kernels(self):
"""Make sure there are no kernels running at the start"""
kernels = self.kern_api.list().json()
self.assertEqual(kernels, [])
def test_default_kernel(self):
# POST request
r = self.kern_api._req('POST', '')
kern1 = r.json()
self.assertEqual(r.headers['location'], url_path_join(self.url_prefix, 'api/kernels', kern1['id']))
self.assertEqual(r.status_code, 201)
self.assertIsInstance(kern1, dict)
report_uri = url_path_join(self.url_prefix, 'api/security/csp-report')
expected_csp = '; '.join([
"frame-ancestors 'self'",
'report-uri ' + report_uri,
"default-src 'none'"
])
self.assertEqual(r.headers['Content-Security-Policy'], expected_csp)
def test_main_kernel_handler(self):
# POST request
r = self.kern_api.start()
kern1 = r.json()
self.assertEqual(r.headers['location'], url_path_join(self.url_prefix, 'api/kernels', kern1['id']))
self.assertEqual(r.status_code, 201)
self.assertIsInstance(kern1, dict)
report_uri = url_path_join(self.url_prefix, 'api/security/csp-report')
expected_csp = '; '.join([
"frame-ancestors 'self'",
'report-uri ' + report_uri,
"default-src 'none'"
])
self.assertEqual(r.headers['Content-Security-Policy'], expected_csp)
# GET request
r = self.kern_api.list()
self.assertEqual(r.status_code, 200)
assert isinstance(r.json(), list)
self.assertEqual(r.json()[0]['id'], kern1['id'])
self.assertEqual(r.json()[0]['name'], kern1['name'])
# create another kernel and check that they both are added to the
# list of kernels from a GET request
kern2 = self.kern_api.start().json()
assert isinstance(kern2, dict)
r = self.kern_api.list()
kernels = r.json()
self.assertEqual(r.status_code, 200)
assert isinstance(kernels, list)
self.assertEqual(len(kernels), 2)
# Interrupt a kernel
r = self.kern_api.interrupt(kern2['id'])
self.assertEqual(r.status_code, 204)
# Restart a kernel
r = self.kern_api.restart(kern2['id'])
rekern = r.json()
self.assertEqual(rekern['id'], kern2['id'])
self.assertEqual(rekern['name'], kern2['name'])
def test_kernel_handler(self):
# GET kernel with given id
kid = self.kern_api.start().json()['id']
r = self.kern_api.get(kid)
kern1 = r.json()
self.assertEqual(r.status_code, 200)
assert isinstance(kern1, dict)
self.assertIn('id', kern1)
self.assertEqual(kern1['id'], kid)
# Request a bad kernel id and check that a JSON
# message is returned!
bad_id = '111-111-111-111-111'
with assert_http_error(404, 'Kernel does not exist: ' + bad_id):
self.kern_api.get(bad_id)
# DELETE kernel with id
r = self.kern_api.shutdown(kid)
self.assertEqual(r.status_code, 204)
kernels = self.kern_api.list().json()
self.assertEqual(kernels, [])
# Request to delete a non-existent kernel id
bad_id = '111-111-111-111-111'
with assert_http_error(404, 'Kernel does not exist: ' + bad_id):
self.kern_api.shutdown(bad_id)
def test_connections(self):
kid = self.kern_api.start().json()['id']
model = self.kern_api.get(kid).json()
self.assertEqual(model['connections'], 0)
ws = self.kern_api.websocket(kid)
model = self.kern_api.get(kid).json()
self.assertEqual(model['connections'], 1)
ws.close()
# give it some time to close on the other side:
for i in range(10):
model = self.kern_api.get(kid).json()
if model['connections'] > 0:
time.sleep(0.1)
else:
break
model = self.kern_api.get(kid).json()
self.assertEqual(model['connections'], 0)
class AsyncKernelAPITest(KernelAPITest):
"""Test the kernels web service API using the AsyncMappingKernelManager"""
@classmethod
def setup_class(cls):
if not async_testing_enabled: # Can be removed once jupyter_client >= 6.1 is required.
raise SkipTest("AsyncKernelAPITest tests skipped due to down-level jupyter_client!")
if sys.version_info < (3, 6): # Can be removed once 3.5 is dropped.
raise SkipTest("AsyncKernelAPITest tests skipped due to Python < 3.6!")
super(AsyncKernelAPITest, cls).setup_class()
@classmethod
def get_argv(cls):
argv = super(AsyncKernelAPITest, cls).get_argv()
# before we extend the argv with the class, ensure that appropriate jupyter_client is available.
# if not available, don't set kernel_manager_class, resulting in the repeat of sync-based tests.
if async_testing_enabled:
argv.extend(['--NotebookApp.kernel_manager_class='
'notebook.services.kernels.kernelmanager.AsyncMappingKernelManager'])
return argv
class KernelFilterTest(NotebookTestBase):
# A special install of NotebookTestBase where only `kernel_info_request`
# messages are allowed.
config = Config({
'NotebookApp': {
'MappingKernelManager': {
'allowed_message_types': ['kernel_info_request']
}
}
})
# Sanity check verifying that the configurable was properly set.
def test_config(self):
self.assertEqual(self.notebook.kernel_manager.allowed_message_types, ['kernel_info_request'])
class KernelCullingTest(NotebookTestBase):
"""Test kernel culling """
@classmethod
def get_argv(cls):
argv = super(KernelCullingTest, cls).get_argv()
# Enable culling with 2s timeout and 1s intervals
argv.extend(['--MappingKernelManager.cull_idle_timeout=2',
'--MappingKernelManager.cull_interval=1',
'--MappingKernelManager.cull_connected=False'])
return argv
def setUp(self):
self.kern_api = KernelAPI(self.request,
base_url=self.base_url(),
headers=self.auth_headers(),
)
def tearDown(self):
for k in self.kern_api.list().json():
self.kern_api.shutdown(k['id'])
def test_culling(self):
kid = self.kern_api.start().json()['id']
ws = self.kern_api.websocket(kid)
model = self.kern_api.get(kid).json()
self.assertEqual(model['connections'], 1)
assert not self.get_cull_status(kid) # connected, should not be culled
ws.close()
assert self.get_cull_status(kid) # not connected, should be culled
def get_cull_status(self, kid):
culled = False
for i in range(15): # Need max of 3s to ensure culling timeout exceeded
try:
self.kern_api.get(kid)
except HTTPError as e:
assert e.response.status_code == 404
culled = True
break
else:
time.sleep(0.2)
return culled
| {
"content_hash": "f0d62c3194cd966cf9cbc5d2f3ab9fcd",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 107,
"avg_line_length": 34.62190812720848,
"alnum_prop": 0.5916513574198816,
"repo_name": "sserrot/champion_relationships",
"id": "36c441c2c7dc42f8faae6ac614ddd59b9030ec69",
"size": "9798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/notebook/services/kernels/tests/test_kernels_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
""" --- geometry parameters for Howorka --- """
nm = 1e-9
# @TODO maybe include tolc in parent file
tolc = 1e-14*nm # tolerance for coordinate comparisons
dim = 2
# DNA radius
rDNA = 1.1*nm
# molecule radius
rMolecule = 0.5*nm
# effective pore radius
r0 = 1*nm
# barrel outer radius
r1 = 2.5*nm
# pore length
l0 = 15.0*nm
# membrane thickness
l1 = 2.2*nm
# Radius of domain
Rz = 15.0*nm
R = 15.0*nm
Rx = R
Ry = Rz
# total number of charged DNA base pairs
ncbp = 6.0*36 # 6 scaffold strands of DNA with 36 charged base pairs
# length scales
lc = nm
lcMolecule = lc*0.1
lcCenter = lc/2
lcOuter = lc
boxfields = True
# provide default values for boundary layer around membrane/molecule
membraneblayer = None
moleculeblayer = None
| {
"content_hash": "8157167e1530792b94cfefbae2e64a7f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 69,
"avg_line_length": 17.975609756097562,
"alnum_prop": 0.7014925373134329,
"repo_name": "mitschabaude/nanopores",
"id": "d33d109f8bd2469dda5bb7bb85e3ca4c38773705",
"size": "737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nanopores/geometries/H_geo/params_geo_old.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "670019"
},
{
"name": "Python",
"bytes": "1665312"
},
{
"name": "Shell",
"bytes": "1606"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import View
class LoginRequired(View):
@method_decorator(login_required)
def dispatch(self, *args, **kwargs):
return super(LoginRequired, self).dispatch(*args, **kwargs)
class UserObjectsOnlyMixin(object):
def get_queryset(self):
return super(UserObjectsOnlyMixin, self).get_queryset().filter(user=self.request.user) | {
"content_hash": "6f53d23b4f0b6250799501cab3e3d011",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 94,
"avg_line_length": 34.642857142857146,
"alnum_prop": 0.7608247422680412,
"repo_name": "andresriancho/django-plans",
"id": "68caa88dddc5752f925aaf8578ba4a4fa46036b6",
"size": "485",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "plans/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3961"
},
{
"name": "Python",
"bytes": "124157"
}
],
"symlink_target": ""
} |
"""Helpers for logging allowing more advanced logging styles to be used."""
import inspect
import logging
# mypy: allow-untyped-defs, no-check-untyped-defs
class KeywordMessage:
"""
Represents a logging message with keyword arguments.
Adapted from: https://stackoverflow.com/a/24683360/2267718
"""
def __init__(self, fmt, args, kwargs):
"""Initialize a new BraceMessage object."""
self._fmt = fmt
self._args = args
self._kwargs = kwargs
def __str__(self):
"""Convert the object to a string for logging."""
return str(self._fmt).format(*self._args, **self._kwargs)
class KeywordStyleAdapter(logging.LoggerAdapter):
"""Represents an adapter wrapping the logger allowing KeywordMessages."""
def __init__(self, logger, extra=None):
"""Initialize a new StyleAdapter for the provided logger."""
super(KeywordStyleAdapter, self).__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
"""Log the message provided at the appropriate level."""
if self.isEnabledFor(level):
msg, log_kwargs = self.process(msg, kwargs)
self.logger._log( # pylint: disable=protected-access
level, KeywordMessage(msg, args, kwargs), (), **log_kwargs
)
def process(self, msg, kwargs):
"""Process the keyward args in preparation for logging."""
return (
msg,
{
k: kwargs[k]
for k in inspect.getfullargspec(
self.logger._log # pylint: disable=protected-access
).args[1:]
if k in kwargs
},
)
| {
"content_hash": "1117dcb57455feaf7b47f97284575a7f",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 77,
"avg_line_length": 32.18867924528302,
"alnum_prop": 0.5908558030480656,
"repo_name": "fbradyirl/home-assistant",
"id": "e69564453fada02ac2d126471be54db327781b9b",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/logging.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1829"
},
{
"name": "Python",
"bytes": "16494727"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17784"
}
],
"symlink_target": ""
} |
from zope.interface import implements
from twisted.names import dns, common
from twisted.python import failure, log
from twisted.internet import interfaces, defer
class CacheResolver(common.ResolverBase):
"""
A resolver that serves records from a local, memory cache.
@ivar _reactor: A provider of L{interfaces.IReactorTime}.
"""
implements(interfaces.IResolver)
cache = None
def __init__(self, cache=None, verbose=0, reactor=None):
common.ResolverBase.__init__(self)
self.cache = {}
self.verbose = verbose
self.cancel = {}
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
if cache:
for query, (seconds, payload) in cache.items():
self.cacheResult(query, payload, seconds)
def __setstate__(self, state):
self.__dict__ = state
now = self._reactor.seconds()
for (k, (when, (ans, add, ns))) in self.cache.items():
diff = now - when
for rec in ans + add + ns:
if rec.ttl < diff:
del self.cache[k]
break
def __getstate__(self):
for c in self.cancel.values():
c.cancel()
self.cancel.clear()
return self.__dict__
def _lookup(self, name, cls, type, timeout):
now = self._reactor.seconds()
q = dns.Query(name, type, cls)
try:
when, (ans, auth, add) = self.cache[q]
except KeyError:
if self.verbose > 1:
log.msg('Cache miss for ' + repr(name))
return defer.fail(failure.Failure(dns.DomainError(name)))
else:
if self.verbose:
log.msg('Cache hit for ' + repr(name))
diff = now - when
try:
result = (
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff,
r.payload) for r in ans],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff,
r.payload) for r in auth],
[dns.RRHeader(str(r.name), r.type, r.cls, r.ttl - diff,
r.payload) for r in add])
except ValueError:
return defer.fail(failure.Failure(dns.DomainError(name)))
else:
return defer.succeed(result)
def lookupAllRecords(self, name, timeout = None):
return defer.fail(failure.Failure(dns.DomainError(name)))
def cacheResult(self, query, payload, cacheTime=None):
"""
Cache a DNS entry.
@param query: a L{dns.Query} instance.
@param payload: a 3-tuple of lists of L{dns.RRHeader} records, the
matching result of the query (answers, authority and additional).
@param cacheTime: The time (seconds since epoch) at which the entry is
considered to have been added to the cache. If C{None} is given,
the current time is used.
"""
if self.verbose > 1:
log.msg('Adding %r to cache' % query)
self.cache[query] = (cacheTime or self._reactor.seconds(), payload)
if query in self.cancel:
self.cancel[query].cancel()
s = list(payload[0]) + list(payload[1]) + list(payload[2])
if s:
m = s[0].ttl
for r in s:
m = min(m, r.ttl)
else:
m = 0
self.cancel[query] = self._reactor.callLater(m, self.clearEntry, query)
def clearEntry(self, query):
del self.cache[query]
del self.cancel[query]
| {
"content_hash": "1b59c3011746a6490b160be06317b0d1",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 79,
"avg_line_length": 30.716666666666665,
"alnum_prop": 0.5371676614215952,
"repo_name": "denny820909/builder",
"id": "854050b3f64375a2ae128baffd6a638ffd077448",
"size": "3804",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/names/cache.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals, division
from armet import http
from .base import BaseResourceTest
class TestResourceDelete(BaseResourceTest):
def test_delete_existing(self, connectors):
response, content = self.client.delete(
path='/api/poll/1/',
headers={'Content-Type': 'application/json'})
assert response.status == http.client.NO_CONTENT
| {
"content_hash": "0be37f6e3e7c577b76fcf23672007df5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 66,
"avg_line_length": 32.07692307692308,
"alnum_prop": 0.697841726618705,
"repo_name": "armet/python-armet",
"id": "037f2e841fa4ab57dbf4d57ee0ddf23b37d1baa8",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/connectors/test_delete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285382"
}
],
"symlink_target": ""
} |
'''
Created on 2012-11-10
@author: vincent
'''
from trac.env import open_environment
import aopMiscUtils
TRACADMIN = "trac-admin"
class Trac:
def __init__(self, envpath):
self.envPath = envpath
env = open_environment(envpath)
self.db = env.get_db_cnx()
def getValidTicket(self, ticket):
cursor = self.db.cursor()
cursor.execute("SELECT COUNT(id) FROM ticket WHERE "
"status <> 'closed' AND id = %d" % ticket)
row = cursor.fetchone()
return row and row[0]>=1
def appendChangeset(self, repos, revision):
cmdPattern = '%s %s changeset added %s %d'
cmd = cmdPattern % (TRACADMIN, self.envPath, repos, int(revision))
return aopMiscUtils.execCommand(cmd) | {
"content_hash": "4a06781d1e865e0be0c23583384b3d84",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 74,
"avg_line_length": 30.96,
"alnum_prop": 0.6111111111111112,
"repo_name": "fleeto/svn2trac",
"id": "2dd36155c1148aa7ebe06a96b3bbc2231017c485",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aopTrac.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "11028"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
RUNWAY_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, ".."))
BIN_DIR = os.path.abspath(os.path.join(RUNWAY_DIR, "bin"))
VALID_FORMATS = {None, "json", "csv"}
def clean_output(output):
lines = output.split("\n")
containers = []
container_info = ""
for line in lines:
if not line:
continue
clean_line = line.split(" ")[0]
if "," in clean_line:
if container_info:
containers.append(container_info)
container_info = clean_line.replace('"', "")
else:
container_info += ",{}".format(clean_line.replace('"', ""))
if container_info:
containers.append(container_info)
return "\n".join(containers)
def parse_output(output):
containers_info = []
for cont in output.split():
container_info = {}
split_info = cont.split(",")
container_info["name"] = split_info.pop(0)
container_info["ip_addresses"] = split_info
containers_info.append(container_info)
return containers_info
usage = "Usage: {} container_name [--csv|--json]".format(sys.argv[0])
try:
container_name = sys.argv[1]
except IndexError:
print(usage)
sys.exit(1)
try:
format = sys.argv[2]
if not format.startswith("--"):
print(usage)
sys.exit(1)
format = format.replace("--", "")
except IndexError:
format = None
if format not in VALID_FORMATS:
print("Error: Invalid format")
print(usage)
sys.exit(1)
cmd = (
"OPTIONALRUNWAYCNAME=1 QUIET=1 source lib/get_container_connection_options.sh "
"&& ssh -q -t ${{VAGRANTOPTIONS}} ${{RUNWAYHOST}} lxc list {} -c n4 --format "
"csv".format(container_name)
)
output = subprocess.check_output(cmd, shell=True, cwd=BIN_DIR, universal_newlines=True)
csv_data = clean_output(output)
if format is None or format == "csv":
print(csv_data)
else:
parsed_data = parse_output(csv_data)
if format == "json":
print(json.dumps(parsed_data))
else:
# We should never get here!
# We'll leave it here to make it clear where to generate other output
# formats if that's ever done.
pass
| {
"content_hash": "a0f57bc35e91e275c0037dd051c0cb66",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 87,
"avg_line_length": 27.76829268292683,
"alnum_prop": 0.6126482213438735,
"repo_name": "swiftstack/runway",
"id": "a0290392e39719a615a494ed501cb94a9e5843d1",
"size": "2387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/get_ip.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66810"
},
{
"name": "Shell",
"bytes": "22809"
}
],
"symlink_target": ""
} |
import os
import sys
import pymongo
import json
from parsing import parse_db_address
def cleanup(path):
if not os.path.isdir(path):
raise Exception("%s is not a valid directory" % path)
with open(os.path.join(path, 'config.json'), 'r') as f:
cfg = json.load(f)
db_address = parse_db_address(cfg)
print 'Cleaning up experiment %s in database at %s' % (cfg["experiment-name"], db_address)
client = pymongo.MongoClient(db_address)
db = client.spearmint
db[cfg["experiment-name"]]['jobs'].drop()
db[cfg["experiment-name"]]['hypers'].drop()
if __name__ == '__main__':
cleanup(sys.argv[1]) | {
"content_hash": "7b0ced52868fb5f9604e3922f4d5e067",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 94,
"avg_line_length": 24.73076923076923,
"alnum_prop": 0.6438569206842923,
"repo_name": "fmaguire/BayeHem",
"id": "71660c388865babfd52482ab3bd264549a1f9b9d",
"size": "10274",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Spearmint/spearmint/utils/cleanup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "10914"
},
{
"name": "Batchfile",
"bytes": "23271"
},
{
"name": "C",
"bytes": "3837551"
},
{
"name": "C++",
"bytes": "33218177"
},
{
"name": "CSS",
"bytes": "1556"
},
{
"name": "Groff",
"bytes": "59793"
},
{
"name": "HTML",
"bytes": "365248"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Java",
"bytes": "13433"
},
{
"name": "Lua",
"bytes": "23713"
},
{
"name": "M4",
"bytes": "19951"
},
{
"name": "Makefile",
"bytes": "118962"
},
{
"name": "Objective-C",
"bytes": "8790"
},
{
"name": "Perl",
"bytes": "272990"
},
{
"name": "Python",
"bytes": "6200881"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "R",
"bytes": "4898"
},
{
"name": "Shell",
"bytes": "270614"
},
{
"name": "TeX",
"bytes": "8434"
},
{
"name": "XSLT",
"bytes": "759"
},
{
"name": "Yacc",
"bytes": "18910"
}
],
"symlink_target": ""
} |
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'consoles')
class ConsolesController(wsgi.Controller):
def __init__(self, *args, **kwargs):
self.compute_api = compute.API()
super(ConsolesController, self).__init__(*args, **kwargs)
@wsgi.action('os-getVNCConsole')
def get_vnc_console(self, req, id, body):
"""Get text console output."""
context = req.environ['nova.context']
authorize(context)
# If type is not supplied or unknown, get_vnc_console below will cope
console_type = body['os-getVNCConsole'].get('type')
try:
instance = self.compute_api.get(context, id)
output = self.compute_api.get_vnc_console(context,
instance,
console_type)
except exception.InstanceNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.InstanceNotReady as e:
raise webob.exc.HTTPConflict(
explanation=e.format_message())
return {'console': {'type': console_type, 'url': output['url']}}
def get_actions(self):
"""Return the actions the extension adds, as required by contract."""
actions = [extensions.ActionExtension("servers", "os-getVNCConsole",
self.get_vnc_console)]
return actions
class Consoles(extensions.ExtensionDescriptor):
"""Interactive Console support."""
name = "Consoles"
alias = "os-consoles"
namespace = "http://docs.openstack.org/compute/ext/os-consoles/api/v2"
updated = "2011-12-23T00:00:00+00:00"
def get_controller_extensions(self):
controller = ConsolesController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
| {
"content_hash": "c056bec69bcf58e8f649103cced9e22e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 79,
"avg_line_length": 37.01724137931034,
"alnum_prop": 0.6208663251047974,
"repo_name": "paulmathews/nova",
"id": "7750a767785b30ee17b8f6cbe7d5436aea13e177",
"size": "2798",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/folsom",
"path": "nova/api/openstack/compute/contrib/consoles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7293434"
},
{
"name": "Shell",
"bytes": "16910"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from django.views.generic import ListView
from djangoautoconf.ajax_select_utils.channel_creator_for_model import create_channels_for_related_fields_in_model
# from towel.modelview import ModelView
from djangoautoconf.django_rest_framework_utils.serializer_generator import get_detail_api_class
from djangoautoconf.model_utils.url_for_models import add_all_urls
from models import UfsObj, Description
from tagging.models import Tag
from api import UfsObjResource
from add_tag_template_view import AddTagTemplateView
from add_tag_template_view_local import AddTagTemplateViewLocal
# from obj_sys.admin import obj_sys_admin_site
from obj_sys.views import get_parent
from ufs_obj_in_tree_view import ItemTreeView
from rss import LatestEntriesFeed
import models
ufs_obj_resource = UfsObjResource()
# ufs_obj_in_tree_resource = UfsObjInTreeResource()
# tag_resource = TagResource()
# obj_views = ModelView(UfsObj)
# resource_views_ajax = ModelView(BookableResource, base_template="modal.html")
# resource_booking_req_views_ajax = ModelView(BookingRequest, base_template="modal.html")
################################
# The following codes can not be put in admin, otherwise there will be template error:
# Reverse for 'obj_sys_description_add' with arguments '()' and keyword arguments '{}' not found. 0 pattern(s) tried: []
# Don't know why
# register_channel(UfsObj, ["ufs_url", "uuid", "full_path", "descriptions__content"])
# register_channel(Description, ["content", ])
create_channels_for_related_fields_in_model(UfsObj)
urlpatterns = patterns('',
url(r'^tagging/$', login_required(AddTagTemplateView.as_view())),
url(r'^get_parent/$', get_parent),
# url(r'^filter', login_required(
# DjangoAutoFilter.as_view(model_class=UfsObj,
# ajax_fields={"relations": "ufs_obj", "parent": "ufs_obj",
# "descriptions": "description"}
# ))),
url(r'^tagging_local/$', login_required(AddTagTemplateViewLocal.as_view())),
url(r'^tagging/(?P<news_item_pk>\d+)/$', login_required(AddTagTemplateView.as_view()),
name="news-item"),
# url(r'^manager/$', 'obj_sys.views.manager'),
url(r'^homepage/$', 'obj_sys.views.listing_with_description'),
(r'^latest/feed/$', LatestEntriesFeed()),
url(r'^append_tags/$', 'obj_sys.obj_tagging.handle_append_tags_request'),
url(r'^query/$', 'obj_sys.views.query'),
url(r'^operations/', 'obj_sys.views.do_operation'),
url(r'^do_json_operation/$', 'obj_sys.views.do_json_operation'),
url(r'^remove_tag/$', 'obj_sys.obj_tagging.remove_tag'),
url(r'^add_tag/$', 'obj_sys.obj_tagging.add_tag'),
url(r'^get_tags/$', 'obj_sys.obj_tagging.get_tags'),
url(r'^tag_list/$', ListView.as_view(
queryset=Tag.objects.all(),
context_object_name='tagged_items',
template_name='obj_sys/pane.html')),
(r'^api/ufsobj/', include(ufs_obj_resource.urls)),
# (r'^api/ufs_obj_in_tree/', include(ufs_obj_in_tree_resource.urls)),
(r'^tree_raw/', ItemTreeView.as_view(template_name='obj_sys/mptt_tree.html')),
(r'^tree/', ItemTreeView.as_view()),
url(r'^ufs_obj_rest/(?P<pk>[0-9]+)/$', get_detail_api_class(UfsObj).as_view()),
url(r'^mptt_tree_view/', login_required(ItemTreeView.as_view(
default_level=2,
ufs_obj_type=2,
template_name='obj_sys/jquery_sortable_list.html'))),
url(r'^$', login_required(
ItemTreeView.as_view(item_class=UfsObj,
# default_level=2,
ufs_obj_type=UfsObj.TYPE_UFS_OBJ,
template_name='obj_sys/mptt_item_tree.html'))),
# url(r'^obj_admin/', include(obj_sys_admin_site.urls)),
# (r'^api/tag/', include(tag_resource.urls)),
# url(r'^$', 'desktop.filemanager.views.index'),
# url(r'^.+$', 'desktop.filemanager.views.handler'),
# url(r'^homepage_all/$', 'obj_sys.views.homepage'),
# url(r'^ufs/', include(obj_views.urls)),
# url(r'^qrcode/$', 'thumbapp.views.gen_qr_code'),
# url(r'^image/$', 'thumbapp.views.image'),
)
add_all_urls(urlpatterns, models)
| {
"content_hash": "3ebdac238fb242bec8d1599eb691927d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 120,
"avg_line_length": 58.146067415730336,
"alnum_prop": 0.5418357487922705,
"repo_name": "weijia/obj_sys",
"id": "acb5473f183932d6c983c3a6aa84fb58eb7e041f",
"size": "5175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "obj_sys/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2851"
},
{
"name": "HTML",
"bytes": "12274"
},
{
"name": "Makefile",
"bytes": "1269"
},
{
"name": "Python",
"bytes": "102004"
}
],
"symlink_target": ""
} |
"""empty message
Revision ID: e86a4e731a84
Revises: 9a85e02050e0
Create Date: 2017-12-19 12:25:41.564000
"""
# revision identifiers, used by Alembic.
revision = 'e86a4e731a84'
down_revision = '9a85e02050e0'
from alembic import op
import sqlalchemy as sa
import sqlalchemy_utils
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('table_columns', sa.Column('is_index', sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('table_columns', 'is_index')
# ### end Alembic commands ###
| {
"content_hash": "a1abcc1437d0bf2145f9d4571f64529c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 86,
"avg_line_length": 24.25925925925926,
"alnum_prop": 0.6885496183206107,
"repo_name": "codesmart-co/bit",
"id": "a674970b4b1447a9b52895bb3ba57303bc01be8a",
"size": "655",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bit/migrations/versions/e86a4e731a84_.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "334"
},
{
"name": "HTML",
"bytes": "4695"
},
{
"name": "JavaScript",
"bytes": "7662"
},
{
"name": "Mako",
"bytes": "436"
},
{
"name": "Python",
"bytes": "285159"
}
],
"symlink_target": ""
} |
def test_assert_false(testdir):
"""Test pytest does not display captured stderr on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_assert_true(testdir):
"""Test pytest does not display captured stderr on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr and stdout is not displayed
for line in result.stdout.lines:
assert "Captured stderr call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_setup_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_setup_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_setup_function_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup function failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_setup_function_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup function failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def setup_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# print(result.stdout.lines)
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr setup" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout setup" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_teardown_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_teardown_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_module(module):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_teardown_function_assert_false(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
def test_teardown_function_assert_true(testdir):
"""Test pytest does not display captured stderr on test setup failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import sys
import datetime
import logging
import logging.handlers
log_format = '%(asctime)s : %(name)s : %(module)s : %(funcName)s : %(levelname)s : %(message)s'
formatter = logging.Formatter(fmt=log_format)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
file_handler = logging.handlers.RotatingFileHandler(file_name, maxBytes=1000, backupCount=5)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def teardown_function(function):
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert True
def test_logging():
assert True
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured stderr is not displayed
for line in result.stdout.lines:
assert "Captured stderr teardown" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout teardown" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 0
def test_hide_capture_log_call(testdir):
"""Test pytest does not display captured log call on test failure."""
# create a temporary pytest test module
testdir.makepyfile(
"""
import pytest
import daiquiri
import sys
import datetime
import logging
strftime_format = '%Y%m%d-%H%M%S'
file_name = '{0}-{1}.log'.format(__name__, datetime.datetime.utcnow().strftime(strftime_format))
daiquiri.setup(
level=logging.INFO,
outputs=(daiquiri.output.File(file_name),),
)
logger = daiquiri.getLogger(__name__)
def test_logging():
print('PRINT DEBUG!')
logger.debug('DEBUG!')
logger.info('INFO!')
logger.warning('WARNING!')
logger.error('ERROR!')
logger.critical('CRITICAL!')
assert False
"""
)
# run pytest with no cmd args
result = testdir.runpytest()
# Assert captured log call is not displayed
for line in result.stdout.lines:
assert "Captured log call" not in line
assert "test_logging : DEBUG : DEBUG!" not in line
assert "test_logging : INFO : INFO!" not in line
assert "test_logging : WARNING : WARNING!" not in line
assert "test_logging : ERROR : ERROR!" not in line
assert "test_logging : CRITICAL : CRITICAL!" not in line
assert "Captured stdout call" not in line
assert "Captured stderr call" not in line
# make sure that that we get a '1' exit code for the testsuite
assert result.ret == 1
| {
"content_hash": "c5818a34ad15505ceb6d7bd64e391fa3",
"timestamp": "",
"source": "github",
"line_count": 672,
"max_line_length": 104,
"avg_line_length": 34.098214285714285,
"alnum_prop": 0.6179191760495767,
"repo_name": "hamzasheikh/pytest-hidecaptured",
"id": "3499e6d7e8762436096be6febbe978dee753e996",
"size": "22940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_hidecaptured.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23491"
}
],
"symlink_target": ""
} |
"""
Keras LSTM Sequence to Sequence Model for Translation
=================================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This script demonstrates how to implement a basic character-level sequence-to-sequence model.
We apply it to translating short English sentences into short French sentences,
character-by-character.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and corresponding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
This script loads the s2s.h5 model saved in repository
https://github.com/dmlc/web-data/raw/master/keras/models/s2s_translate/lstm_seq2seq.py
and generates sequences from it. It assumes that no changes have been made (for example:
latent_dim is unchanged, and the input data and model architecture are unchanged).
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
See lstm_seq2seq.py for more details on the model architecture and how it is trained.
"""
from keras.models import Model, load_model
from keras.layers import Input
import random
import os
import numpy as np
import keras
import tvm
import nnvm
######################################################################
# Download required files
# -----------------------
# Download files listed below from dmlc web-data repo.
model_file = "s2s_translate.h5"
data_file = "fra-eng.txt"
# Base location for model related files.
repo_base = 'https://github.com/dmlc/web-data/raw/master/keras/models/s2s_translate/'
model_url = os.path.join(repo_base, model_file)
data_url = os.path.join(repo_base, data_file)
# Download files listed below.
from tvm.contrib.download import download_testdata
model_path = download_testdata(model_url, model_file, module='keras')
data_path = download_testdata(data_url, data_file, module='data')
latent_dim = 256 # Latent dimensionality of the encoding space.
test_samples = 10000 # Number of samples used for testing.
######################################################################
# Process the data file
# ---------------------
# Vectorize the data. We use the same approach as the training script.
# NOTE: the data must be identical, in order for the character -> integer
# mappings to be consistent.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
test_samples = min(test_samples, len(lines))
max_encoder_seq_length = 0
max_decoder_seq_length = 0
for line in lines[:test_samples]:
input_text, target_text = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
max_encoder_seq_length = max(max_encoder_seq_length, len(input_text))
max_decoder_seq_length = max(max_decoder_seq_length, len(target_text))
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
# Reverse-lookup token index to decode sequences back to something readable.
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
######################################################################
# Load Keras Model
# ----------------
# Restore the model and construct the encoder and decoder.
model = load_model(model_path)
encoder_inputs = model.input[0] # input_1
encoder_outputs, state_h_enc, state_c_enc = model.layers[2].output # lstm_1
encoder_states = [state_h_enc, state_c_enc]
encoder_model = Model(encoder_inputs, encoder_states)
decoder_inputs = model.input[1] # input_2
decoder_state_input_h = Input(shape=(latent_dim,), name='input_3')
decoder_state_input_c = Input(shape=(latent_dim,), name='input_4')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_lstm = model.layers[3]
decoder_outputs, state_h_dec, state_c_dec = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h_dec, state_c_dec]
decoder_dense = model.layers[4]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
######################################################################
# Compile both encoder and decoder model on NNVM
# ----------------------------------------------
# Creates NNVM graph definition from keras model file.
from tvm.contrib import graph_runtime
target = 'llvm'
ctx = tvm.cpu(0)
# Parse Encoder model
sym, params = nnvm.frontend.from_keras(encoder_model)
inp_enc_shape = (1, max_encoder_seq_length, num_encoder_tokens)
shape_dict = {'input_1': inp_enc_shape}
# Build Encoder model
with nnvm.compiler.build_config(opt_level=2):
enc_graph, enc_lib, enc_params = nnvm.compiler.build(sym, target, shape_dict, params=params)
print("Encoder build ok.")
# Create graph runtime for encoder model
tvm_enc = graph_runtime.create(enc_graph, enc_lib, ctx)
tvm_enc.set_input(**enc_params)
# Parse Decoder model
inp_dec_shape = (1, 1, num_decoder_tokens)
shape_dict = {'input_2': inp_dec_shape,
'input_3': (1, latent_dim),
'input_4': (1, latent_dim)}
# Build Decoder model
sym, params = nnvm.frontend.from_keras(decoder_model)
with nnvm.compiler.build_config(opt_level=2):
dec_graph, dec_lib, dec_params = nnvm.compiler.build(sym, target, shape_dict, params=params)
print("Decoder build ok.")
# Create graph runtime for decoder model
tvm_dec = graph_runtime.create(dec_graph, dec_lib, ctx)
tvm_dec.set_input(**dec_params)
# Decodes an input sequence.
def decode_sequence(input_seq):
# Set the input for encoder model.
tvm_enc.set_input('input_1', input_seq)
# Run encoder model
tvm_enc.run()
# Get states from encoder network
h = tvm_enc.get_output(0).asnumpy()
c = tvm_enc.get_output(1).asnumpy()
# Populate the first character of target sequence with the start character.
sampled_token_index = target_token_index['\t']
# Sampling loop for a batch of sequences
decoded_sentence = ''
while True:
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens), dtype='float32')
# Update the target sequence (of length 1).
target_seq[0, 0, sampled_token_index] = 1.
# Set the input and states for decoder model.
tvm_dec.set_input('input_2', target_seq)
tvm_dec.set_input('input_3', h)
tvm_dec.set_input('input_4', c)
# Run decoder model
tvm_dec.run()
output_tokens = tvm_dec.get_output(0).asnumpy()
h = tvm_dec.get_output(1).asnumpy()
c = tvm_dec.get_output(2).asnumpy()
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
# Exit condition: either hit max length or find stop character.
if sampled_char == '\n':
break
# Update the sentence
decoded_sentence += sampled_char
if len(decoded_sentence) > max_decoder_seq_length:
break
return decoded_sentence
def generate_input_seq(input_text):
input_seq = np.zeros((1, max_encoder_seq_length, num_encoder_tokens), dtype='float32')
for t, char in enumerate(input_text):
input_seq[0, t, input_token_index[char]] = 1.
return input_seq
######################################################################
# Run the model
# -------------
# Randonly take some text from test samples and translate
for seq_index in range(100):
# Take one sentence randomly and try to decode.
index = random.randint(1, test_samples)
input_text, _ = lines[index].split('\t')
input_seq = generate_input_seq(input_text)
decoded_sentence = decode_sequence(input_seq)
print((seq_index + 1), ": ", input_text, "==>", decoded_sentence)
| {
"content_hash": "66e05b887938bb39ce096d26d39d7988",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 96,
"avg_line_length": 38.285714285714285,
"alnum_prop": 0.6658252853380158,
"repo_name": "Huyuwei/tvm",
"id": "16c737418c6f8e80f66535ed6f0aaca5f3242c86",
"size": "9897",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nnvm/tutorials/nlp/keras_s2s_translate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5569606"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6775044"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96633"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
'''
Module for handling openstack glance calls.
:optdepends: - glanceclient Python adapter
:configuration: This module is not usable until the following are specified
either in a pillar or in the minion's config file::
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.insecure: False #(optional)
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
If configuration for multiple openstack accounts is required, they can be
set up as different configuration profiles:
For example::
openstack1:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.1:5000/v2.0/'
openstack2:
keystone.user: admin
keystone.password: verybadpass
keystone.tenant: admin
keystone.tenant_id: f80919baedab48ec8931f200c65a50df
keystone.auth_url: 'http://127.0.0.2:5000/v2.0/'
With this configuration in place, any of the keystone functions can make use
of a configuration profile by declaring it explicitly.
For example::
salt '*' glance.image_list profile=openstack1
'''
# Import third party libs
HAS_GLANCE = False
try:
from glanceclient import client
import glanceclient.v1.images
HAS_GLANCE = True
except ImportError:
pass
def __virtual__():
'''
Only load this module if glance
is installed on this minion.
'''
if HAS_GLANCE:
return 'glance'
return False
__opts__ = {}
def _auth(profile=None):
'''
Set up keystone credentials
'''
kstone = __salt__['keystone.auth'](profile)
token = kstone.auth_token
endpoint = kstone.service_catalog.url_for(
service_type='image',
endpoint_type='publicURL',
)
return client.Client('1', endpoint, token=token)
def image_create(profile=None, **kwargs):
'''
Create an image (glance image-create)
CLI Example:
.. code-block:: bash
salt '*' glance.image_create name=f16-jeos is_public=true \\
disk_format=qcow2 container_format=ovf \\
copy_from=http://berrange.fedorapeople.org/images/2012-02-29/f16-x86_64-openstack-sda.qcow2
For all possible values, run ``glance help image-create`` on the minion.
'''
nt_ks = _auth(profile)
fields = dict(
filter(
lambda x: x[0] in glanceclient.v1.images.CREATE_PARAMS,
kwargs.items()
)
)
image = nt_ks.images.create(**fields)
newimage = image_list(str(image.id))
return {newimage['name']: newimage}
def image_delete(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Delete an image (glance image-delete)
CLI Examples:
.. code-block:: bash
salt '*' glance.image_delete c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete id=c2eb2eb0-53e1-4a80-b990-8ec887eae7df
salt '*' glance.image_delete name=f16-jeos
'''
nt_ks = _auth(profile)
if name:
for image in nt_ks.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {'Error': 'Unable to resolve image id'}
nt_ks.images.delete(id)
ret = 'Deleted image with ID {0}'.format(id)
if name:
ret += ' ({0})'.format(name)
return ret
def image_show(id=None, name=None, profile=None): # pylint: disable=C0103
'''
Return details about a specific image (glance image-show)
CLI Example:
.. code-block:: bash
salt '*' glance.image_get
'''
nt_ks = _auth(profile)
ret = {}
if name:
for image in nt_ks.images.list():
if image.name == name:
id = image.id # pylint: disable=C0103
continue
if not id:
return {'Error': 'Unable to resolve image id'}
image = nt_ks.images.get(id)
ret[image.name] = {
'id': image.id,
'name': image.name,
'checksum': image.checksum,
'container_format': image.container_format,
'created_at': image.created_at,
'deleted': image.deleted,
'disk_format': image.disk_format,
'is_public': image.is_public,
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'owner': image.owner,
'protected': image.protected,
'size': image.size,
'status': image.status,
'updated_at': image.updated_at,
}
return ret
def image_list(id=None, profile=None): # pylint: disable=C0103
'''
Return a list of available images (glance image-list)
CLI Example:
.. code-block:: bash
salt '*' glance.image_list
'''
nt_ks = _auth(profile)
ret = {}
for image in nt_ks.images.list():
ret[image.name] = {
'id': image.id,
'name': image.name,
'checksum': image.checksum,
'container_format': image.container_format,
'created_at': image.created_at,
'deleted': image.deleted,
'disk_format': image.disk_format,
'is_public': image.is_public,
'min_disk': image.min_disk,
'min_ram': image.min_ram,
'owner': image.owner,
'protected': image.protected,
'size': image.size,
'status': image.status,
'updated_at': image.updated_at,
}
if id == image.id:
return ret[image.name]
return ret
def _item_list(profile=None):
'''
Template for writing list functions
Return a list of available items (glance items-list)
CLI Example:
.. code-block:: bash
salt '*' glance.item_list
'''
nt_ks = _auth(profile)
ret = []
for item in nt_ks.items.list():
ret.append(item.__dict__)
#ret[item.name] = {
# 'name': item.name,
# }
return ret
#The following is a list of functions that need to be incorporated in the
#glance module. This list should be updated as functions are added.
#image-download Download a specific image.
#image-update Update a specific image.
#member-create Share a specific image with a tenant.
#member-delete Remove a shared image from a tenant.
#member-list Describe sharing permissions by image or tenant.
| {
"content_hash": "2607170ccb55ebe062e6a2703a4191d7",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 108,
"avg_line_length": 28.879310344827587,
"alnum_prop": 0.585820895522388,
"repo_name": "victorywang80/Maintenance",
"id": "b4c6f48b8e100c3c3727daa5b4cfa53f7e735418",
"size": "6724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saltstack/src/salt/modules/glance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160954"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "4522522"
},
{
"name": "Scheme",
"bytes": "7488"
},
{
"name": "Shell",
"bytes": "14653"
}
],
"symlink_target": ""
} |
"""
Bitmap is a basic wrapper for image pixels. It includes some basic processing
tools: crop, find bounding box of a color and compute histogram of color values.
"""
import array
import base64
import cStringIO
import collections
import os
import struct
import subprocess
def HistogramDistance(hist1, hist2):
"""Earth mover's distance.
http://en.wikipedia.org/wiki/Earth_mover's_distance
First, normalize the two histograms. Then, treat the two histograms as
piles of dirt, and calculate the cost of turning one pile into the other.
To do this, calculate the difference in one bucket between the two
histograms. Then carry it over in the calculation for the next bucket.
In this way, the difference is weighted by how far it has to move."""
if len(hist1) != len(hist2):
raise ValueError('Trying to compare histograms '
'of different sizes, %s != %s' % (len(hist1), len(hist2)))
n1 = sum(hist1)
n2 = sum(hist2)
if n1 == 0:
raise ValueError('First histogram has 0 pixels in it.')
if n2 == 0:
raise ValueError('Second histogram has 0 pixels in it.')
total = 0
remainder = 0
for value1, value2 in zip(hist1, hist2):
remainder += value1 * n2 - value2 * n1
total += abs(remainder)
assert remainder == 0, (
'%s pixel(s) left over after computing histogram distance.'
% abs(remainder))
return abs(float(total) / n1 / n2)
class ColorHistogram(
collections.namedtuple('ColorHistogram', ['r', 'g', 'b', 'default_color'])):
# pylint: disable=W0232
# pylint: disable=E1002
def __new__(cls, r, g, b, default_color=None):
return super(ColorHistogram, cls).__new__(cls, r, g, b, default_color)
def Distance(self, other):
total = 0
for i in xrange(3):
hist1 = self[i]
hist2 = other[i]
if sum(self[i]) == 0:
if not self.default_color:
raise ValueError('Histogram has no data and no default color.')
hist1 = [0] * 256
hist1[self.default_color[i]] = 1
if sum(other[i]) == 0:
if not other.default_color:
raise ValueError('Histogram has no data and no default color.')
hist2 = [0] * 256
hist2[other.default_color[i]] = 1
total += HistogramDistance(hist1, hist2)
return total
class RgbaColor(collections.namedtuple('RgbaColor', ['r', 'g', 'b', 'a'])):
"""Encapsulates an RGBA color retreived from a Bitmap"""
# pylint: disable=W0232
# pylint: disable=E1002
def __new__(cls, r, g, b, a=255):
return super(RgbaColor, cls).__new__(cls, r, g, b, a)
def __int__(self):
return (self.r << 16) | (self.g << 8) | self.b
def IsEqual(self, expected_color, tolerance=0):
"""Verifies that the color is within a given tolerance of
the expected color"""
r_diff = abs(self.r - expected_color.r)
g_diff = abs(self.g - expected_color.g)
b_diff = abs(self.b - expected_color.b)
a_diff = abs(self.a - expected_color.a)
return (r_diff <= tolerance and g_diff <= tolerance
and b_diff <= tolerance and a_diff <= tolerance)
def AssertIsRGB(self, r, g, b, tolerance=0):
assert self.IsEqual(RgbaColor(r, g, b), tolerance)
def AssertIsRGBA(self, r, g, b, a, tolerance=0):
assert self.IsEqual(RgbaColor(r, g, b, a), tolerance)
WEB_PAGE_TEST_ORANGE = RgbaColor(222, 100, 13)
WHITE = RgbaColor(255, 255, 255)
class _BitmapTools(object):
"""Wraps a child process of bitmaptools and allows for one command."""
CROP_PIXELS = 0
HISTOGRAM = 1
BOUNDING_BOX = 2
def __init__(self, dimensions, pixels):
binary = './bitmaptools'
assert os.path.exists(binary), 'You must build bitmaptools first!'
self._popen = subprocess.Popen([binary],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# dimensions are: bpp, width, height, boxleft, boxtop, boxwidth, boxheight
packed_dims = struct.pack('iiiiiii', *dimensions)
self._popen.stdin.write(packed_dims)
# If we got a list of ints, we need to convert it into a byte buffer.
if type(pixels) is not bytearray:
pixels = bytearray(pixels)
self._popen.stdin.write(pixels)
def _RunCommand(self, *command):
assert not self._popen.stdin.closed, (
'Exactly one command allowed per instance of tools.')
packed_command = struct.pack('i' * len(command), *command)
self._popen.stdin.write(packed_command)
self._popen.stdin.close()
length_packed = self._popen.stdout.read(struct.calcsize('i'))
if not length_packed:
raise Exception(self._popen.stderr.read())
length = struct.unpack('i', length_packed)[0]
return self._popen.stdout.read(length)
def CropPixels(self):
return self._RunCommand(_BitmapTools.CROP_PIXELS)
def Histogram(self, ignore_color, tolerance):
ignore_color_int = -1 if ignore_color is None else int(ignore_color)
response = self._RunCommand(_BitmapTools.HISTOGRAM,
ignore_color_int, tolerance)
out = array.array('i')
out.fromstring(response)
assert len(out) == 768, (
'The ColorHistogram has the wrong number of buckets: %s' % len(out))
return ColorHistogram(out[:256], out[256:512], out[512:], ignore_color)
def BoundingBox(self, color, tolerance):
response = self._RunCommand(_BitmapTools.BOUNDING_BOX, int(color),
tolerance)
unpacked = struct.unpack('iiiii', response)
box, count = unpacked[:4], unpacked[-1]
if box[2] < 0 or box[3] < 0:
box = None
return box, count
class Bitmap(object):
"""Utilities for parsing and inspecting a bitmap."""
def __init__(self, bpp, width, height, pixels, metadata=None):
assert bpp in [3, 4], 'Invalid bytes per pixel'
assert width > 0, 'Invalid width'
assert height > 0, 'Invalid height'
assert pixels, 'Must specify pixels'
assert bpp * width * height == len(pixels), 'Dimensions and pixels mismatch'
self._bpp = bpp
self._width = width
self._height = height
self._pixels = pixels
self._metadata = metadata or {}
self._crop_box = None
@property
def bpp(self):
"""Bytes per pixel."""
return self._bpp
@property
def width(self):
"""Width of the bitmap."""
return self._crop_box[2] if self._crop_box else self._width
@property
def height(self):
"""Height of the bitmap."""
return self._crop_box[3] if self._crop_box else self._height
def _PrepareTools(self):
"""Prepares an instance of _BitmapTools which allows exactly one command.
"""
crop_box = self._crop_box or (0, 0, self._width, self._height)
return _BitmapTools((self._bpp, self._width, self._height) + crop_box,
self._pixels)
@property
def pixels(self):
"""Flat pixel array of the bitmap."""
if self._crop_box:
self._pixels = self._PrepareTools().CropPixels()
_, _, self._width, self._height = self._crop_box
self._crop_box = None
if type(self._pixels) is not bytearray:
self._pixels = bytearray(self._pixels)
return self._pixels
@property
def metadata(self):
self._metadata['size'] = (self.width, self.height)
self._metadata['alpha'] = self.bpp == 4
self._metadata['bitdepth'] = 8
return self._metadata
def GetPixelColor(self, x, y):
"""Returns a RgbaColor for the pixel at (x, y)."""
pixels = self.pixels
base = self._bpp * (y * self._width + x)
if self._bpp == 4:
return RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2], pixels[base + 3])
return RgbaColor(pixels[base + 0], pixels[base + 1],
pixels[base + 2])
def IsEqual(self, other, tolerance=0):
"""Determines whether two Bitmaps are identical within a given tolerance."""
# Dimensions must be equal
if self.width != other.width or self.height != other.height:
return False
# Loop over each pixel and test for equality
if tolerance or self.bpp != other.bpp:
for y in range(self.height):
for x in range(self.width):
c0 = self.GetPixelColor(x, y)
c1 = other.GetPixelColor(x, y)
if not c0.IsEqual(c1, tolerance):
return False
else:
return self.pixels == other.pixels
return True
def Diff(self, other):
"""Returns a new Bitmap that represents the difference between this image
and another Bitmap."""
# Output dimensions will be the maximum of the two input dimensions
out_width = max(self.width, other.width)
out_height = max(self.height, other.height)
diff = [[0 for x in xrange(out_width * 3)] for x in xrange(out_height)]
# Loop over each pixel and write out the difference
for y in range(out_height):
for x in range(out_width):
if x < self.width and y < self.height:
c0 = self.GetPixelColor(x, y)
else:
c0 = RgbaColor(0, 0, 0, 0)
if x < other.width and y < other.height:
c1 = other.GetPixelColor(x, y)
else:
c1 = RgbaColor(0, 0, 0, 0)
offset = x * 3
diff[y][offset] = abs(c0.r - c1.r)
diff[y][offset+1] = abs(c0.g - c1.g)
diff[y][offset+2] = abs(c0.b - c1.b)
# This particular method can only save to a file, so the result will be
# written into an in-memory buffer and read back into a Bitmap
diff_img = png.from_array(diff, mode='RGB')
output = cStringIO.StringIO()
try:
diff_img.save(output)
diff = Bitmap.FromPng(output.getvalue())
finally:
output.close()
return diff
def GetBoundingBox(self, color, tolerance=0):
"""Finds the minimum box surrounding all occurences of |color|.
Returns: (top, left, width, height), match_count
Ignores the alpha channel."""
return self._PrepareTools().BoundingBox(color, tolerance)
def Crop(self, left, top, width, height):
"""Crops the current bitmap down to the specified box."""
cur_box = self._crop_box or (0, 0, self._width, self._height)
cur_left, cur_top, cur_width, cur_height = cur_box
if (left < 0 or top < 0 or
(left + width) > cur_width or
(top + height) > cur_height):
raise ValueError('Invalid dimensions')
self._crop_box = cur_left + left, cur_top + top, width, height
return self
def ColorHistogram(self, ignore_color=None, tolerance=0):
"""Computes a histogram of the pixel colors in this Bitmap.
Args:
ignore_color: An RgbaColor to exclude from the bucket counts.
tolerance: A tolerance for the ignore_color.
Returns:
A ColorHistogram namedtuple with 256 integers in each field: r, g, and b.
"""
return self._PrepareTools().Histogram(ignore_color, tolerance)
| {
"content_hash": "1ad0bbb0e33fed83ba4ffc72af41255d",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 80,
"avg_line_length": 33.70716510903427,
"alnum_prop": 0.6324399260628466,
"repo_name": "google/AppSpeedIndex",
"id": "9e7b796d9220b7d2227d0920573fbb38c60fb151",
"size": "10983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bitmap.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "6416"
},
{
"name": "Java",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "183266"
}
],
"symlink_target": ""
} |
import uuid
from django.core.urlresolvers import reverse
from django import http
from django.utils.datastructures import SortedDict
from mox import IsA
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class InstanceViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.flavor_list(IsA(http.HttpRequest)).AndReturn(flavors)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_list_exception(self):
servers = self.servers.list()
tenants = self.tenants.list()
flavors = self.flavors.list()
full_flavors = SortedDict([(f.id, f) for f in flavors])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndRaise(self.exceptions.nova)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndReturn(full_flavors[server.flavor["id"]])
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
instances = res.context['table'].data
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('flavor_list', 'flavor_get',
'server_list',),
api.keystone: ('tenant_list',)})
def test_index_flavor_get_exception(self):
servers = self.servers.list()
flavors = self.flavors.list()
tenants = self.tenants.list()
# UUIDs generated using indexes are unlikely to match
# any of existing flavor ids and are guaranteed to be deterministic.
for i, server in enumerate(servers):
server.flavor['id'] = str(uuid.UUID(int=i))
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([servers, False])
api.nova.flavor_list(IsA(http.HttpRequest)). \
AndReturn(flavors)
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([tenants, False])
for server in servers:
api.nova.flavor_get(IsA(http.HttpRequest), server.flavor["id"]). \
AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
instances = res.context['table'].data
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertMessageCount(res, error=len(servers))
self.assertItemsEqual(instances, servers)
@test.create_stubs({api.nova: ('server_list',)})
def test_index_server_list_exception(self):
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertTemplateUsed(res, 'admin/instances/index.html')
self.assertEqual(len(res.context['instances_table'].data), 0)
@test.create_stubs({api.nova: ('server_get', 'flavor_get',),
api.keystone: ('tenant_get',)})
def test_ajax_loading_instances(self):
server = self.servers.first()
flavor = self.flavors.list()[0]
tenant = self.tenants.list()[0]
api.nova.server_get(IsA(http.HttpRequest), server.id).AndReturn(server)
api.nova.flavor_get(IsA(http.HttpRequest),
server.flavor['id']).AndReturn(flavor)
api.keystone.tenant_get(IsA(http.HttpRequest),
server.tenant_id,
admin=True).AndReturn(tenant)
self.mox.ReplayAll()
url = reverse('horizon:admin:instances:index') + \
"?action=row_update&table=instances&obj_id=" + server.id
res = self.client.get(url, {},
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
self.assertTemplateUsed(res, "horizon/common/_data_table_row.html")
self.assertContains(res, "test_tenant", 1, 200)
self.assertContains(res, "instance-host", 1, 200)
# two instances of name, other name comes from row data-display
self.assertContains(res, "server_1", 2, 200)
self.assertContains(res, "10.0.0.1", 1, 200)
self.assertContains(res, "512MB RAM | 1 VCPU | 0 Disk", 1, 200)
self.assertContains(res, "Active", 1, 200)
self.assertContains(res, "Running", 1, 200)
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index_options_before_migrate(self):
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([self.tenants.list(), False])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertContains(res, "instances__migrate")
self.assertNotContains(res, "instances__confirm")
self.assertNotContains(res, "instances__revert")
@test.create_stubs({api.nova: ('flavor_list', 'server_list',),
api.keystone: ('tenant_list',)})
def test_index_options_after_migrate(self):
servers = self.servers.list()
server1 = servers[0]
server1.status = "VERIFY_RESIZE"
server2 = servers[2]
server2.status = "VERIFY_RESIZE"
api.keystone.tenant_list(IsA(http.HttpRequest)) \
.AndReturn([self.tenants.list(), False])
search_opts = {'marker': None, 'paginate': True}
api.nova.server_list(IsA(http.HttpRequest),
all_tenants=True, search_opts=search_opts) \
.AndReturn([self.servers.list(), False])
api.nova.flavor_list(IsA(http.HttpRequest)).\
AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:admin:instances:index'))
self.assertContains(res, "instances__confirm")
self.assertContains(res, "instances__revert")
self.assertNotContains(res, "instances__migrate")
| {
"content_hash": "5613ce3d285f58ad2f39eccc3f76a420",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 79,
"avg_line_length": 46.80681818181818,
"alnum_prop": 0.5793882010196649,
"repo_name": "fajoy/horizon-example",
"id": "8e379ef601abdc9f3e7e7db35465244e007bb09a",
"size": "8888",
"binary": false,
"copies": "2",
"ref": "refs/heads/example",
"path": "openstack_dashboard/dashboards/admin/instances/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import time
import re
import logging
from ggputils.utils import *
from ggputils.utils import _fmt
from cgi import escape
from gevent.lock import *
from gevent.queue import *
from gevent.event import *
g_logger = logging.getLogger(__name__)
#-------------------------------------------------------------------------
# Handler does the hard work
#-------------------------------------------------------------------------
class Handler(object):
#-------------------------------------
# The GGP gdl protocol versions
#-------------------------------------
GGP1 = 1 # GDL I protocol
GGP2 = 2 # GDL-II protocol
#-------------------------------------
# compiled regex for the GGP messages
#-------------------------------------
SEARCH_START = r'^\s*\(\s*START'
SEARCH_PLAY = r'\s*\(\s*PLAY'
SEARCH_STOP = r'\s*\(\s*STOP'
SEARCH_INFO = r'\s*\(\s*INFO'
SEARCH_ABORT = r'\s*\(\s*ABORT'
SEARCH_PREVIEW = r'\s*\(\s*PREVIEW'
MATCH_START = r'^\s*\(\s*START\s+([^\s]+)\s+([^\s]+)\s+\((.*)\)\s+(\d+)\s+(\d+)\s*\)\s*$'
MATCH_PLAY = r'^\s*\(\s*PLAY\s+([^\s]+)\s+(.*)\s*\)\s*$'
MATCH_STOP = r'^\s*\(\s*STOP\s+([^\s]+)\s+(.*)\s*\)\s*$'
MATCH_INFO = r'\s*\(\s*INFO\s*\)\s*$'
MATCH_ABORT = r'\s*\(\s*ABORT\s+([^\s]+)\s*\)\s*$'
MATCH_PREVIEW = r'\s*\(\s*PREVIEW\s+\((.*)\)\s+(\d+)\s*\)\s*$'
MATCH_SPS_MATCHID = r'\s*\(\s*(START|PLAY|STOP)\s+([^\s]+)\s+.*\)\s*$'
re_s_START = re.compile(SEARCH_START, re.IGNORECASE)
re_s_PLAY = re.compile(SEARCH_PLAY, re.IGNORECASE)
re_s_STOP = re.compile(SEARCH_STOP, re.IGNORECASE)
re_s_INFO = re.compile(SEARCH_INFO, re.IGNORECASE)
re_s_ABORT = re.compile(SEARCH_ABORT, re.IGNORECASE)
re_s_PREVIEW = re.compile(SEARCH_PREVIEW, re.IGNORECASE)
re_m_START = re.compile(MATCH_START, re.IGNORECASE | re.DOTALL)
re_m_PLAY = re.compile(MATCH_PLAY, re.IGNORECASE | re.DOTALL)
re_m_STOP = re.compile(MATCH_STOP, re.IGNORECASE | re.DOTALL)
re_m_INFO = re.compile(MATCH_INFO, re.IGNORECASE | re.DOTALL)
re_m_ABORT = re.compile(MATCH_ABORT, re.IGNORECASE | re.DOTALL)
re_m_PREVIEW = re.compile(MATCH_PREVIEW, re.IGNORECASE | re.DOTALL)
re_m_SPS_MATCHID = re.compile(MATCH_SPS_MATCHID, re.IGNORECASE | re.DOTALL)
re_m_GDL_ROLE = re.compile("role", re.IGNORECASE)
#---------------------------------------------------------------------------------
# Constructor takes callbacks for the different GGP message types.
# INFO and PREVIEW callbacks are optional with the following default behaviours:
# - PREVIEW: does nothing except responds with "DONE"
# - INFO: if not in a game then responds with "AVAILABLE", or "BUSY" otherwise.
#---------------------------------------------------------------------------------
def __init__(self, on_start=None,
on_play=None, on_stop=None,
on_play2=None, on_stop2=None,
on_abort=None,
on_info=None, on_preview=None,
protocol_version=None, test_mode=False):
if not protocol_version: protocol_version=Handler.GGP1
assert protocol_version in [Handler.GGP1, Handler.GGP2],\
"Unrecognised GDL protocol version {0}".format(protocol_version)
# Test mode is useful for unit testing individual callback functions
if not test_mode:
if (protocol_version == Handler.GGP1) and \
not (on_start and on_play and on_stop and on_abort):
raise ValueError(("Must have valid callbacks for: on_start, "
"on_play, on_stop, on_abort"))
elif (protocol_version == Handler.GGP2) and \
not (on_start and on_play2 and on_stop2 and on_abort):
raise ValueError(("Must have valid callbacks for: on_start, "
"on_play2, on_stop2, on_abort"))
g_logger.info("Running player for GDL version: {0}".format(protocol_version))
self._protocol_version = protocol_version
self._on_START = on_start
self._on_PLAY = on_play
self._on_STOP = on_stop
self._on_PLAY2 = on_play2
self._on_STOP2 = on_stop2
self._on_ABORT = on_abort
self._on_INFO = on_info
self._on_PREVIEW = on_preview
self._all_conn_queue = Queue()
self._good_conn_queue = Queue()
self._uppercase = True
# Game player state related variables
self._gdl2_turn = 0
self._matchid = None
self._playclock = None
self._startclock = None
self._roles = []
#----------------------------------------------------------------------------
# Call that adheres to the WSGI application specification. Handles
# all connections in order and tries to weed out bad
# ones. Maintains two queues: all-connections and
# good-connections. Every connection is added to the
# all-connections queue. It then decides if it is a bad connection
# in which case it doesn't go on to the good-connection queue. This
# is done immediately on the handler being called. If it is a good
# connection then it is placed on the good-connection queue to
# ensure that only one message is handled at a time and that it is
# handled in the correct order.
#
# This two queue mechanism ensures that bad message can be quickly
# filtered out while maintaining a clean orderly queue for
# legitimate messages. Of course, in normal operation we would
# expect the good queue to only ever contain the current message
# being handled, but it does mean that even if the player gets
# behind, the messages will be processed in an orderly way and
# there is the possibility of catching up.
# ----------------------------------------------------------------------------
def __call__(self, environ, start_response):
# Timestamp as early as possible
timestamp = time.time()
# NOTE: _get_http_post(environ) can only be called once.
try:
# post_message = escape(_get_http_post(environ))
post_message = _get_http_post(environ)
except:
return self._app_bad(environ, start_response)
# Handle one connection at a time in order by creating an event
# adding it to the queue and then waiting for that event to be called.
myevent = AsyncResult()
self._all_conn_queue.put(myevent)
# If I'm not the head of the all queue then wait till I'm called
if self._all_conn_queue.peek() != myevent: myevent.wait()
mygood = self._is_good_connection(environ, timestamp, post_message)
# If I'm not bad then add myself to the good connection queue
if mygood:
myevent = AsyncResult()
self._good_conn_queue.put(myevent)
# remove myself from the all queue and call up the next one
self._all_conn_queue.get()
if not self._all_conn_queue.empty(): self._all_conn_queue.peek().set()
# If I'm not good then the journey ends here
if not mygood: return self._app_bad(environ, start_response)
# If I'm not the head of the good queue then wait till I'm called
if self._good_conn_queue.peek() != myevent: myevent.wait()
result = self._app_normal(environ, start_response, timestamp, post_message)
# remove myself from the good queue and call up the next one
self._good_conn_queue.get()
if not self._good_conn_queue.empty(): self._good_conn_queue.peek().set()
# g_logger.debug(_fmt("Handled message in: {0}s", time.time() - timestamp))
return result
#---------------------------------------------------------------------------------
# Internal functions to handle messages
# _app_normal is for normal operation.
# _app_bad is called when the handle is for bad a connection.
#---------------------------------------------------------------------------------
def _app_normal(self, environ, start_response, timestamp, post_message):
try:
response_body = self._handle_POST(timestamp, post_message)
response_headers = _get_response_headers(environ, response_body)
start_response('200 OK', response_headers)
return response_body
except HTTPErrorResponse as er:
g_logger.info(_fmt("HTTPErrorResponse: {0}", er))
response_headers = _get_response_headers(environ, "")
start_response(str(er), response_headers)
return ""
except Exception as e:
g_logger.error(_fmt("Unknown Exception: {0}", e))
response_headers = _get_response_headers(environ, "")
start_response('500 Internal Server Error', response_headers)
raise
return ""
def _app_bad(self, environ, start_response):
try:
# Return an error
response_headers = _get_response_headers(environ, "")
start_response('400 Invalid GGP message', response_headers)
return ""
except Exception as e:
g_logger.error(_fmt("Unknown Exception: {0}", e))
response_headers = _get_response_headers(environ, "")
start_response('500 Internal Server Error', response_headers)
return ""
#---------------------------------------------------------------------------------
# Internal functions to decide if the caller is a good or bad connection:
# - GGP is only interested in POST messages.
# - ABORT should always be let through.
# - If a START/PLAY/STOP message has not been responded to within its timeout.
# then we let the message through. Otherwise its not our fault so assume bad.
#
# BUG NOTE 20141224: I need to revisit this at some point. Firstly, should weed out
# non-GGP messages here. Also should probably allow mismatched matchids here but
# ensure that it checks later to make sure there are no problems.
#---------------------------------------------------------------------------------
def _is_good_connection(self, environ, timestamp, message):
# Preview messages are always ok
if Handler.re_s_PREVIEW.match(message): return True
# A START message when we are in a game could mean a number of things:
# 1) either a message has been lost (somehow),
# 2) The game master is not operating correctly (eg. crashed and restarted),
# 3) The player (i.e., the callback functions) have not been responded
# within the timeout and there may be an end/abort message that is in
# the queue waiting to be handled.
# Whatever the case the best we can do is log an error and let the
# message through.
if Handler.re_s_START.match(message):
if self._matchid is not None:
g_logger.error(("A new START message has been received before the"
"match {0} has ended.").format(self._matchid))
return True
# Non-START game messages (those with matchids) are ok only if
# they match the current matchid.
match = Handler.re_m_SPS_MATCHID.match(message)
matchid = None
if match:
matchid = match.group(2)
else:
match = Handler.re_m_ABORT.match(message)
if match:
matchid = match.group(1)
if matchid:
if matchid == self._matchid: return True
else: return False
# It is good
return True
#---------------------------------------------------------------------------------
# Internal functions to format the response message based on the
# game master using upper or lower case. Don't think it matters
# for the Dresden game master but does for Stanford.
# ---------------------------------------------------------------------------------
def _response(self, response):
if self._uppercase: return response.upper()
return response.lower()
#---------------------------------------------------------------------------------
# Internal functions - handle the different types of GGP messages
#---------------------------------------------------------------------------------
def _handle_POST(self, timestamp, message):
logstr = message
if len(logstr) > 40: logstr = logstr[:50] + "..."
g_logger.info("Game Master message: {0}".format(logstr))
if Handler.re_s_START.match(message):
return self.handle_START(timestamp, message)
elif Handler.re_s_PLAY.match(message):
return self.handle_PLAY(timestamp, message)
elif Handler.re_s_STOP.match(message):
return self.handle_STOP(timestamp, message)
elif Handler.re_s_INFO.match(message):
return self.handle_INFO(timestamp, message)
elif Handler.re_s_ABORT.match(message):
return self.handle_ABORT(timestamp, message)
elif Handler.re_s_PREVIEW.match(message):
return self.handle_PREVIEW(timestamp, message)
else:
raise HTTPErrorResponse(400, "Invalid GGP message: {0}".format(message))
#----------------------------------------------------------------------
# handle GGP START message
#----------------------------------------------------------------------
def handle_START(self, timestamp, message):
self._set_case(message, "START")
match = Handler.re_m_START.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed START message {0}".format(message))
self._matchid = match.group(1)
role = match.group(2)
gdl = match.group(3)
self._startclock = int(match.group(4))
self._playclock = int(match.group(5))
if self._protocol_version == Handler.GGP2: self._gdl2_turn = 0
# Hack: need to process the GDL to extract the order of roles as they appear
# in the GDL file so that we can get around the brokeness of the PLAY/STOP
# messages, which require a player to know the order of roles to match
# to the correct actions.
try:
self._roles_in_correct_order(gdl)
except Exception as e:
g_logger.error(_fmt("GDL error. Will ignore this game: {0}", e))
self._matchid = None
return
timeout = Timeout(timestamp, self._startclock)
self._on_START(timeout.clone(), self._matchid, role, gdl, self._playclock)
remaining = timeout.remaining()
if remaining <= 0:
g_logger.error(_fmt("START messsage handler late response by {0}s", remaining))
else:
g_logger.debug(_fmt("START response with {0}s remaining", remaining))
# Now return the READY response
return self._response("READY")
#----------------------------------------------------------------------
# handle GGP PLAY message
#----------------------------------------------------------------------
def handle_PLAY(self, timestamp, message):
match = Handler.re_m_PLAY.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed PLAY message {0}".format(message))
matchid = match.group(1)
if self._matchid != matchid:
self._on_ABORT()
self._matchid = None
raise HTTPErrorResponse(400, ("PLAY message has wrong matchid: "
"{0} {1}").format(matchid, self._matchid))
tmpstr = match.group(2)
action=None
actionstr=""
# GGP 1 and GGP 2 are handled differently
if self._protocol_version == Handler.GGP1:
# GDL-I: a list of actions
if not re.match(r'^\s*\(.*\)\s*$', tmpstr) and \
not re.match(r'^\s*NIL\s*$', tmpstr, re.I):
raise HTTPErrorResponse(400, "Malformed PLAY message {0}".format(message))
actions = parse_actions_sexp(tmpstr)
if len(actions) != 0 and len(actions) != len(self._roles):
raise HTTPErrorResponse(400, "Malformed PLAY message {0}".format(message))
timeout = Timeout(timestamp, self._playclock)
action = self._on_PLAY(timeout.clone(), dict(zip(self._roles, actions)))
else:
# GDL-II: a list of observations
(turn, action, observations) = _parse_gdl2_playstop_component("PLAY", message, tmpstr)
timeout = Timeout(timestamp, self._playclock)
action = self._on_PLAY2(timeout.clone(), action, observations)
if turn != self._gdl2_turn:
raise HTTPErrorResponse(400, ("PLAY message has wrong turn number: "
"{0} {1}").format(turn, self._gdl2_turn))
self._gdl2_turn += 1
# Handle the return action
actionstr = "{0}".format(action)
# Make sure the action is a valid s-expression
try:
exp = parse_simple_sexp(actionstr.strip())
except:
actionstr = "({0})".format(actionstr)
g_logger.critical(_fmt(("Invalid action '{0}'. Will try to recover to "
"and send {1}"), action, actionstr))
remaining = timeout.remaining()
if remaining <= 0:
g_logger.error(_fmt("PLAY messsage handler late response by {0}s", remaining))
else:
g_logger.info(_fmt("PLAY response with {0}s remaining: {1}", remaining, action))
# Returns the action as the response
return actionstr
#----------------------------------------------------------------------
# handle GDL STOP message
#----------------------------------------------------------------------
def handle_STOP(self, timestamp, message):
match = Handler.re_m_STOP.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed STOP message {0}".format(message))
# Make sure the matchid is correct
matchid = match.group(1)
if self._matchid != matchid:
self._on_ABORT()
self._matchid = None
raise HTTPErrorResponse(400, ("PLAY message has wrong matchid: "
"{0} {1}").format(matchid, self._matchid))
# Extract the actions and match to the correct roles
tmpstr = match.group(2)
# GGP 1 and GGP 2 are handled differently
if self._protocol_version == Handler.GGP1:
# GDL-I: a list of actions
actions = parse_actions_sexp(tmpstr)
if len(actions) != len(self._roles):
raise HTTPErrorResponse(400, "Malformed STOP message {0}".format(message))
timeout = Timeout(timestamp, self._playclock)
self._on_STOP(timeout.clone(), dict(zip(self._roles, actions)))
else:
# GDL-II: a list of observations
(turn, action, observations) = _parse_gdl2_playstop_component("STOP", message, tmpstr)
if turn != self._gdl2_turn:
raise HTTPErrorResponse(400, ("STOP message has wrong turn number: "
"{0} {1}").format(turn, self._gdl2_turn))
self._gdl2_turn += 1
timeout = Timeout(timestamp, self._playclock)
self._on_STOP2(timeout.clone(), action, observations)
remaining = timeout.remaining()
if remaining <= 0:
g_logger.error(_fmt("STOP messsage handler late response by {0}s", remaining))
else:
g_logger.debug(_fmt("STOP response with {0}s remaining", remaining))
# Now return the DONE response
return self._response("DONE")
#----------------------------------------------------------------------
# handle GGP INFO message
#----------------------------------------------------------------------
def handle_INFO(self, timestamp, message):
self._set_case(message, "INFO")
match = Handler.re_m_INFO.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed INFO message {0}".format(message))
# If no INFO callback provide a sensible default
if not self._on_INFO:
if self._matchid: return self._response("BUSY")
return self._response("AVAILABLE")
# Use the user-provided callback
response = self._on_INFO()
if not response:
raise ValueError("on_info() callback returned an empty value")
return self._response(self._on_INFO())
#----------------------------------------------------------------------
# handle GGP ABORT message
#----------------------------------------------------------------------
def handle_ABORT(self, timestamp, message):
self._set_case(message, "ABORT")
match = Handler.re_m_ABORT.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed ABORT message {0}".format(message))
matchid = match.group(1)
if self._matchid != matchid:
self._on_ABORT()
self._matchid = None
raise HTTPErrorResponse(400, ("ABORT message has wrong matchid: "
"{0} {1}").format(matchid, self._matchid))
self._matchid = None
self._on_ABORT()
# Stanford test website doesn't match the protocol description at:
# http://games.stanford.edu/index.php/communication-protocol
# Test website expects "ABORTED" while description states "DONE"
return self._response("ABORTED")
#----------------------------------------------------------------------
# handle GGP PREVIEW message
#----------------------------------------------------------------------
def handle_PREVIEW(self, timestamp, message):
self._set_case(message, "PREVIEW")
match = Handler.re_m_PREVIEW.match(message)
if not match:
raise HTTPErrorResponse(400, "Malformed PREVIEW message {0}".format(message))
gdl = match.group(1)
previewclock = int(match.group(2))
timeout = Timeout(timestamp, previewclock)
if self._on_PREVIEW: self._on_PREVIEW(timeout, gdl)
return self._response("DONE")
#---------------------------------------------------------------------------------
# Internal functions - work out the case for talking to the game server
#---------------------------------------------------------------------------------
def _set_case(self, message, command="START"):
uc = r'^\s*\(\s*{0}'.format(command.upper())
lc = r'^\s*\(\s*{0}'.format(command.lower())
if re.match(uc, message): self._uppercase = True
elif re.match(lc, message): self._uppercase = False
else:
g_logger.warning(("Cannot determine case used by game server, "
"so defaulting to uppercase responses"))
self._uppercase = True
#---------------------------------------------------------------------------------
# Maintain a list of roles in the same order as it appears in the GDL.
# _roles_in_correct_order(self, gdl)
#---------------------------------------------------------------------------------
def _roles_in_correct_order(self, gdl):
self._roles = []
exp = parse_simple_sexp("({0})".format(gdl))
for pexp in exp:
if type(pexp) == type([]) and len(pexp) == 2:
if self.re_m_GDL_ROLE.match(pexp[0]):
self._roles.append(pexp[1])
if not self._roles: raise ValueError("Invalid GDL has no roles")
#---------------------------------------------------------------------------------
# User callable functions
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
# Internal support functions and classes
#---------------------------------------------------------------------------------
class HTTPErrorResponse(Exception):
def __init__(self, status, message):
Exception.__init__(self, message)
self.status = status
self.message = message
def __str__(self):
return "{0} {1}".format(self.status, self.message)
#---------------------------------------------------------------------------------
# _get_response_headers(environ_dict, response_body)
# Returns a sensible reponse header. Input is the original evironment
# dictionary and the response_body (used for calculating the context-length).
# Output a list of tuples of (variable, value) pairs.
#---------------------------------------------------------------------------------
def _get_response_headers(environ, response_body):
newenv = []
try:
# Adjust the content type header to match the game controller
if 'CONTENT_TYPE' in environ:
newenv.append(('Content-Type', environ.get('CONTENT_TYPE')))
else:
newenv.append(('Content-Type', 'text/acl'))
# Now the other headers
newenv.append(('Content-Length', str(len(response_body))))
newenv.append(('Access-Control-Allow-Origin', '*'))
# newenv.append(('Access-Control-Allow-Method', 'POST, GET, OPTIONS'))
newenv.append(('Access-Control-Allow-Method', 'POST'))
newenv.append(('Allow-Control-Allow-Headers', 'Content-Type'))
newenv.append(('Access-Control-Allow-Age', str(86400)))
except:
pass
return newenv
#---------------------------------------------------------------------------------
# _get_http_post(environ)
# Checks that it is a valid http post message and returns the content of the message.
# NOTE: should be call only once because the 'wsgi.input' object is a stream object
# so will be empty once it has been read.
#---------------------------------------------------------------------------------
def _get_http_post(environ):
try:
if environ.get('REQUEST_METHOD') != "POST":
raise HTTPErrorResponse(405, 'Non-POST method not supported')
request_body_size = int(environ.get('CONTENT_LENGTH'))
if request_body_size <= 5:
raise HTTPErrorResponse(400, 'Message content too short to be meaningful')
return environ['wsgi.input'].read(request_body_size)
except HTTPErrorResponse:
raise
except Exception as e:
g_logger.warning(_fmt("HTTP POST exception: {0}", e))
raise HTTPErrorResponse(400, 'Invalid content')
#---------------------------------------------------------------------------------
# parse part of a GDL-II play/stop message consisting of:
# "<turn> <lastmove> <observations>"
# Returns a triple of these elements.
# ---------------------------------------------------------------------------------
def _parse_gdl2_playstop_component(mtype, message, component):
error="Malformed GDL-II {0} message {1}".format(mtype, message)
# Handle the turn part first
match = re.match(r'^\s*(\d+)\s+(.*)\s*$', component)
if not match: raise HTTPErrorResponse(400, error)
turn=int(match.group(1))
tmpstr=match.group(2)
# Parse the remaining <lastmove> <observations> as an sexpression
exp=parse_simple_sexp("({0})".format(tmpstr))
if type(exp) == type(''): raise HTTPErrorResponse(400, error)
if len(exp) != 2: raise HTTPErrorResponse(400, error)
lastaction = exp_to_sexp(exp[0])
if lastaction == "NIL": lastaction=None
if turn == 0 and lastaction: raise HTTPErrorResponse(400, error)
if type(exp[1]) == type(''):
if exp[1] != "NIL": raise HTTPErrorResponse(400, error)
return (turn, lastaction, [])
observations = []
for oexp in exp[1]:
observations.append(exp_to_sexp(oexp))
return (turn, lastaction, observations)
#---------------------------------------------------------------------------------
# Unescape html the "<" ">" "&"
# _unescape_html(string)
#---------------------------------------------------------------------------------
def _unescape(s):
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("&", "&") # must be last
return s
| {
"content_hash": "ae657698ae3891bf678a8da9725e4888",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 98,
"avg_line_length": 45.22770700636943,
"alnum_prop": 0.5314579445833186,
"repo_name": "daveraja/pyggputils",
"id": "0e14dd7a160c25ebf3678227d60efbd381b7a524",
"size": "31678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ggputils/player/ggp_http_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "63561"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import print_function
import theano.tensor as T
from collections import OrderedDict
from theano import scan
from keras.layers.core import Layer, Merge
from keras.utils.theano_utils import ndim_tensor, alloc_zeros_matrix
from ..utils import apply_layer
class Recursive(Layer):
'''
Implement a NN graph with arbitrary layer connections,
arbitrary number of inputs and arbitrary number of outputs.
Note: Graph can only be used as a layer
(connect, input, get_input, get_output)
when it has exactly one input and one output.
inherited from Layer:
- get_params
- get_output_mask
- supports_masked_input
- get_weights
- set_weights
'''
def __init__(self, truncate_gradient=-1, return_sequences=False):
self.return_sequences = return_sequences
self.truncate_gradient = truncate_gradient
self.namespace = set() # strings
self.nodes = OrderedDict() # layer-like
self.inputs = OrderedDict() # layer-like
self.input_order = [] # strings
self.states = OrderedDict() # theano.tensors
self.state_order = [] # strings
self.initial_states = []
self.outputs = {} # layer-like
self.output_order = [] # strings
self.input_config = [] # dicts
self.state_config = [] # dicts
self.output_config = [] # dicts
self.node_config = [] # dicts
self.state_map = {}
self.params = []
self.regularizers = []
self.constraints = []
self.updates = []
self.states_map = {}
@property
def nb_input(self):
return len(self.inputs)
@property
def nb_output(self):
return len(self.outputs)
def set_previous(self, layer, connection_map={}):
if self.nb_input != layer.nb_output:
raise Exception('Cannot connect layers: input count does not match output count.')
if self.nb_input == 1:
self.inputs[self.input_order[0]].set_previous(layer)
else:
if not connection_map:
raise Exception('Cannot attach multi-input layer: no connection_map provided.')
for k, v in connection_map.items():
if k in self.inputs and v in layer.outputs:
self.inputs[k].set_previous(layer.outputs[v])
else:
raise Exception('Invalid connection map.')
def get_input(self, train=False):
if len(self.inputs) == len(self.outputs) == 1:
return self.inputs[self.input_order[0]].get_input(train)
else:
return dict([(k, v.get_input(train)) for k, v in self.inputs.items()])
def get_states(self):
return dict([(k, v) for k, v in self.states.items()])
@property
def input(self):
return self.get_input()
@property
def state(self):
return self.get_states()
def get_output(self, train=False):
outputs = self._get_output()
outputs = [o for o, n in zip(outputs, self.nodes.values()) if n.is_output]
# print('::: ouputs {} | nodes {}'.format(outputs, self.nodes.values()))
if len(self.inputs) == len(outputs) == 1:
return outputs[0]
else:
return dict([(k, o) for k, o in zip(self.outputs.keys(), outputs)])
def add_input(self, name, ndim=3, dtype='float'):
if name in self.namespace:
raise Exception('Duplicate node identifier: ' + name)
self.namespace.add(name)
self.input_order.append(name)
layer = Layer() # empty layer
if dtype == 'float':
layer.input = ndim_tensor(ndim)
else:
if ndim == 2:
layer.input = T.imatrix()
else:
raise Exception('Type "int" can only be used with ndim==2 (Embedding).')
layer.input.name = name
self.inputs[name] = layer
self.input_config.append({'name': name, 'ndim': ndim, 'dtype': dtype})
def add_state(self, name, dim):
if name in self.namespace:
raise Exception('Duplicate node identifier: ' + name)
self.namespace.add(name)
self.state_order.append(name)
inps = self.input
if isinstance(inps, dict):
batch_size = inps.values()[0].shape[0]
else:
batch_size = inps.shape[0]
self.states[name] = T.unbroadcast(alloc_zeros_matrix(batch_size, dim), 1)
self.state_config.append({'name': name, 'dim': dim})
def add_node(self, layer, name, input=None, inputs=[], merge_mode='concat',
return_state=None, create_output=False):
if return_state is None:
self.initial_states.append(None)
else:
self.initial_states.append(self.states[return_state])
self.state_map[return_state] = name
layer.state_name = return_state
if hasattr(layer, 'set_name'):
layer.set_name(name)
if name in self.namespace:
raise Exception('Duplicate node identifier: ' + name)
if input:
# if input not in self.namespace:
# raise Exception('Unknown node/input identifier: ' + input)
# if input in self.nodes:
# layer.set_previous(self.nodes[input])
# elif input in self.inputs:
# layer.set_previous(self.inputs[input])
# layer.input_names = [input, ]
inputs = [input, ]
if inputs:
to_merge = []
for n in inputs:
if n in self.nodes:
to_merge.append(self.nodes[n])
elif n in self.inputs:
to_merge.append(self.inputs[n])
elif n in self.states:
# to_merge.append(self.states[n])
pass
else:
raise Exception('Unknown identifier: ' + n)
# merge = Merge(to_merge, mode=merge_mode)
# layer.set_previous(merge)
layer.input_names = inputs
layer.input_list = inputs if input is None else [input, ]
layer.merge_mode = merge_mode
self.namespace.add(name)
self.nodes[name] = layer
self.node_config.append({'name': name,
'input': input,
'inputs': inputs,
'merge_mode': merge_mode})
layer.init_updates()
params, regularizers, constraints, updates = layer.get_params()
self.params += params
self.regularizers += regularizers
self.constraints += constraints
self.updates += updates
if create_output:
self.add_output(name, input=name)
self.nodes[name].is_output = True
else:
self.nodes[name].is_output = False
def get_constants(self):
return []
def _step(self, *args):
# print('--- {}'.format(args))
local_outputs = OrderedDict()
for k, node in self.nodes.items():
# print('This is node {}'.format(k))
local_inputs = []
for inp in node.input_names:
# print('>>> input {}'.format(inp))
if inp in self.input_order:
idx = self.input_order.index(inp)
local_inputs.append(args[idx])
# print('iii idx: {}'.format(idx))
elif inp in local_outputs:
# print('??? output {}'.format(inp))
local_inputs.append(local_outputs[inp])
elif inp in node.input_list: # state input
idx = len(self.input_order) + self.state_order.index(inp)
# print('!!! state {0}, idx {1}'.format(inp, idx))
local_inputs.append(args[idx])
local_inputs = [x for x in local_inputs if x != []]
# print(local_inputs)
if len(local_inputs) > 1:
if node.merge_mode == 'concat':
inputs = T.concatenate(local_inputs, axis=-1)
elif node.merge_mode == 'sum':
inputs = sum(local_inputs)
else:
inputs = local_inputs[0]
# print('After concat {}'.format(inputs))
local_outputs[k] = apply_layer(node, inputs)
# print('local outputs: {}'.format(local_outputs))
# print('+++ {}'.format(local_outputs.values()))
out_vals = []
for k, v in local_outputs.items():
# print('key: {}'.format(k))
out_vals.append(v)
# return local_outputs.values()
return out_vals
def _get_output(self, train=False):
I = self.get_input()
if isinstance(I, dict):
X = [x.dimshuffle(1, 0, 2) for x in I.values()]
else:
X = I.dimshuffle(1, 0, 2)
# print('=='*10)
# print('*** {}'.format(self.initial_states))
outputs, updates = scan(self._step,
sequences=X,
outputs_info=self.initial_states,
non_sequences=self.params + self.get_constants(),
truncate_gradient=self.truncate_gradient
)
outputs = [x.dimshuffle(1, 0, 2) if self.return_sequences else x[-1] for x in outputs]
return outputs
def add_output(self, name, input=None, inputs=[], merge_mode='concat'):
if name in self.output_order:
raise Exception('Duplicate output identifier: ' + name)
if input:
if input not in self.namespace:
raise Exception('Unknown node/input identifier: ' + input)
if input in self.nodes:
self.outputs[name] = self.nodes[input]
elif input in self.inputs:
self.outputs[name] = self.inputs[input]
if inputs:
to_merge = []
for n in inputs:
if n not in self.nodes:
raise Exception('Unknown identifier: ' + n)
to_merge.append(self.nodes[n])
merge = Merge(to_merge, mode=merge_mode)
self.outputs[name] = merge
self.output_order.append(name)
self.output_config.append({'name': name,
'input': input,
'inputs': inputs,
'merge_mode': merge_mode})
def get_config(self):
return {"name": self.__class__.__name__,
"input_config": self.input_config,
"node_config": self.node_config,
"output_config": self.output_config,
"input_order": self.input_order,
"output_order": self.output_order,
"nodes": dict([(c["name"], self.nodes[c["name"]].get_config()) for c in self.node_config])}
def _dict_get(dic, key):
if dic.get(key) is None:
return list()
else:
return dic.get(key)
| {
"content_hash": "22f5cfd2f0c7a5147a1371e1e076fc5c",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 107,
"avg_line_length": 38.65172413793103,
"alnum_prop": 0.5299313052011776,
"repo_name": "berleon/seya",
"id": "54957d056f06891fd31c0fc2ed72024f5ea0bc62",
"size": "11209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seya/layers/containers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "135285"
}
],
"symlink_target": ""
} |
import os
import sys
from django.conf import settings
import django
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
#sys.path.insert(0, os.path.abspath("../.."))
settings.configure()
django.setup()
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.inheritance_diagram',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django ERP'
copyright = u'2015, Emanuele Bertoldi'
author = u'Emanuele Bertoldi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'pt_BR'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoERPdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoERP.tex', u'Django ERP Documentation',
u'Emanuele Bertoldi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangoerp', u'Django ERP Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoERP', u'Django ERP Documentation',
author, 'DjangoERP', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| {
"content_hash": "6b46beb64d2bad799eea2751c54d49ff",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 80,
"avg_line_length": 31.65564738292011,
"alnum_prop": 0.6988947872247846,
"repo_name": "mobb-io/django-erp",
"id": "000f9422af595dd6cb6e737c4ba918185e944ea8",
"size": "11914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11442"
},
{
"name": "HTML",
"bytes": "37278"
},
{
"name": "JavaScript",
"bytes": "1666"
},
{
"name": "Python",
"bytes": "511749"
}
],
"symlink_target": ""
} |
from slisonner import decoder, encoder
from tests import mocker
from tempfile import mkdtemp
from shutil import rmtree
def test_full_encode_decode_cycle():
temp_out_dir = mkdtemp()
slice_id = '2015-01-02 00:00:00'
x_size, y_size = 10, 16
temp_slice_path = mocker.generate_slice(x_size, y_size, 'float32')
slice_meta_encoded, slison_filepath = encoder.encode_slice_file(
filepath=temp_slice_path,
slice_duration=300,
timestamp=slice_id,
layer_id='london',
x_size=x_size,
y_size=y_size,
value_type='float32',
out_dir=temp_out_dir)
slice_data, slice_meta_decoded = decoder.decode_slison(slison_filepath)
for key, encoded_value in slice_meta_encoded.items():
assert encoded_value == slice_meta_decoded[key]
rmtree(temp_out_dir)
| {
"content_hash": "ec9291920cc3c61074e0dee2eb4f03e0",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 30.85185185185185,
"alnum_prop": 0.6626650660264105,
"repo_name": "Habidatum/slisonner",
"id": "ab5be15bbc59ecc18cf93a6170b0dd272f33cfd6",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_slison.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6387"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "homeworkpal_project.settings.local")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "6cbb2041a5dd9e5a48f3414728903636",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 89,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.7235772357723578,
"repo_name": "luiscberrocal/homeworkpal",
"id": "93d935246fcd1879760b2ce8b4e2fbb785511f02",
"size": "268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeworkpal_project/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "306"
},
{
"name": "HTML",
"bytes": "32887"
},
{
"name": "JavaScript",
"bytes": "36072"
},
{
"name": "Python",
"bytes": "233008"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('.')
import saltlint
errors = saltlint.run('tests/states/dataset.sls')
def test_indent():
assert len(errors) == 0
| {
"content_hash": "1681bcc7a2bd71a920320c4c4120005a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 18.125,
"alnum_prop": 0.7103448275862069,
"repo_name": "dragon788/salt-lint",
"id": "26dd273074f778bbcc26664b9fe0964608c76bea",
"size": "168",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_dataset.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11878"
},
{
"name": "SaltStack",
"bytes": "1510"
}
],
"symlink_target": ""
} |
import unittest
import thread_cert
import mle
LEADER_1_2 = 1
ROUTER_1_1 = 2
REED_1_2 = 3
ROUTER_1_2 = 4
REED_1_1 = 5
MED_1_1 = 6
MED_1_2 = 7
# Topology
# (lq:2) (pp:1)
# REED_1_2 ----- ROUTER_1_2
# | \ / | \
# | \/ REED_1_1 \
# (lq:2) | / \ / `router` \
# | (lq:2) \ \
# | / / \ \
# LEADER_1_2 --- ROUTER_1_1 -- MED_1_2
# \ |
# \ |
# \ |
# MED_1_1
#
# 1) Bring up LEADER_1_2 and ROUTER_1_1,
# 2) Config link quality (LEADER_1_2->REED_1_2) as 2, bring up REED_1_2 which would attach to ROUTER_1_1
# due to higher two-way link quality,
# 3) Config link quality(LEADER_1_2->ROUTER_1_2) and link quality(REED_1_2->ROUTER_1_2) as 2, bring up
# ROUTER_1_2 which would attach to LEADER_1_2 due to active router is preferred,
# 4) Config parent priority as 1 on ROUTER_1_2, bring up REED_1_1 which would attach to ROUTER_1_2 due to
# higher parent priority,
# 5) Upgrade REED_1_1 to `router` role, bring up MED_1_1 which would attach to LEADER_1_2 which has higher
# link quality of 3,
# 6) Config parent priority as 1 on ROUTER_1_1, bring up MED_1_2 which would attach to ROUTER_1_2 due to
# higher version
#
class TestParentSelection(thread_cert.TestCase):
TOPOLOGY = {
LEADER_1_2: {
'version': '1.2',
'allowlist': [REED_1_2, ROUTER_1_2, REED_1_1, ROUTER_1_1, MED_1_1],
},
ROUTER_1_1: {
'version': '1.1',
'allowlist': [LEADER_1_2, REED_1_2, MED_1_2, MED_1_1],
},
REED_1_2: {
'version': '1.2',
'allowlist': [ROUTER_1_2, ROUTER_1_1, LEADER_1_2],
},
ROUTER_1_2: {
'version': '1.2',
'allowlist': [REED_1_2, MED_1_2, REED_1_1, LEADER_1_2],
},
REED_1_1: {
'version': '1.1',
'allowlist': [ROUTER_1_2, LEADER_1_2]
},
MED_1_1: {
'mode': 'r',
'version': '1.1',
'allowlist': [LEADER_1_2, ROUTER_1_1],
},
MED_1_2: {
'mode': 'r',
'version': '1.2',
'allowlist': [ROUTER_1_1, ROUTER_1_2],
},
}
"""All nodes are created with default configurations"""
def test(self):
self.nodes[LEADER_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER_1_2].get_state(), 'leader')
self.nodes[ROUTER_1_1].set_router_selection_jitter(1)
self.nodes[ROUTER_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER_1_1].get_state(), 'router')
# Mesh Impacting Criteria - Highest Two-way link quality
# REED_1_2 would attach to ROUTER_1_1
# Attach to ROUTER_1_1 which has highest two-way link quality
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_1])
self.nodes[LEADER_1_2].set_link_quality(self.nodes[REED_1_2].get_addr64(), 2)
self.nodes[REED_1_2].set_router_selection_jitter(1)
self.nodes[REED_1_2].set_router_upgrade_threshold(1)
self.nodes[REED_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_2].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_prefer = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
# Known that link margin for link quality 3 is 80 and link quality 2 is 15
assert ((parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin -
parent_cmp.get_mle_message_tlv(mle.LinkMargin).link_margin) > 20)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(REED_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_1])
# Mesh Impacting Criteria - Active Routers over REEDs
# ROUTER_1_2 would attach to LEADER_1_2
# Link quality configuration, so that REED_1_2 has the chance to respond
# Flush relative message queues
self.flush_nodes([LEADER_1_2, REED_1_2])
self.nodes[LEADER_1_2].set_link_quality(self.nodes[ROUTER_1_2].get_addr64(), 2)
self.nodes[REED_1_2].set_link_quality(self.nodes[ROUTER_1_2].get_addr64(), 2)
self.nodes[ROUTER_1_2].set_router_selection_jitter(1)
self.nodes[ROUTER_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER_1_2].get_state(), 'router')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
# Skip first response for first parent request
assert messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
parent_prefer = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(REED_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[LEADER_1_2])
# Mesh Impacting Criteria - Highest Parent Priority value in the Connectivity TLV
# REED_1_1 would attach to ROUTER_1_2
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_2])
self.nodes[ROUTER_1_2].set_parent_priority(1)
self.nodes[REED_1_1].set_router_selection_jitter(1)
self.nodes[REED_1_1].set_router_upgrade_threshold(1)
self.nodes[REED_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_1].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
parent_prefer = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).pp > parent_cmp.get_mle_message_tlv(
mle.Connectivity).pp)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(REED_1_1)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_2])
# Mesh Impacting Criteria - Router with the most high-quality neighbors
# (Link Quality 3 field in the Connectivity TLV)
# MED_1_1 would attach to LEADER_1_2
self.nodes[REED_1_1].set_state('router')
self.simulator.go(5)
self.assertEqual(self.nodes[REED_1_1].get_state(), 'router')
# Flush relative message queues
self.flush_nodes([LEADER_1_2, ROUTER_1_1])
self.nodes[MED_1_1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED_1_1].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(LEADER_1_2)
parent_prefer = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).pp == parent_cmp.get_mle_message_tlv(
mle.Connectivity).pp)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).link_quality_3 > parent_cmp.get_mle_message_tlv(
mle.Connectivity).link_quality_3)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(MED_1_1)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[LEADER_1_2])
# Child Impacting Criteria - A Version number in the Version TLV
# equal to or higher than the version that implements features
# desirable to the Child MED_1_2 would attach to ROUTER_1_2
# Flush relative message queues
self.flush_nodes([ROUTER_1_2, ROUTER_1_1])
self.nodes[ROUTER_1_1].set_parent_priority(1)
self.nodes[MED_1_2].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED_1_2].get_state(), 'child')
# Check Parent Response
messages = self.simulator.get_messages_sent_by(ROUTER_1_2)
parent_prefer = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_prefer), "Error: Expected parent response not found"
messages = self.simulator.get_messages_sent_by(ROUTER_1_1)
parent_cmp = messages.next_mle_message(mle.CommandType.PARENT_RESPONSE)
assert (parent_cmp), "Error: Expected parent response not found"
assert (parent_prefer.get_mle_message_tlv(mle.LinkMargin).link_margin == parent_cmp.get_mle_message_tlv(
mle.LinkMargin).link_margin)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).pp == parent_cmp.get_mle_message_tlv(
mle.Connectivity).pp)
assert (parent_prefer.get_mle_message_tlv(mle.Connectivity).link_quality_3 == parent_cmp.get_mle_message_tlv(
mle.Connectivity).link_quality_3)
assert (parent_prefer.get_mle_message_tlv(mle.Version).version > parent_cmp.get_mle_message_tlv(
mle.Version).version)
# Check Child Id Request
messages = self.simulator.get_messages_sent_by(MED_1_2)
msg = messages.next_mle_message(mle.CommandType.CHILD_ID_REQUEST)
msg.assertSentToNode(self.nodes[ROUTER_1_2])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "83f93bfde55cfb588d0032c6fddab065",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 117,
"avg_line_length": 42.43295019157088,
"alnum_prop": 0.6235665914221219,
"repo_name": "bukepo/openthread",
"id": "154d400fb3d0d899fc9b1f44d0e30f7f1496e1ef",
"size": "12680",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/v1_2_test_parent_selection.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "C",
"bytes": "1080565"
},
{
"name": "C++",
"bytes": "5839893"
},
{
"name": "CMake",
"bytes": "95509"
},
{
"name": "Dockerfile",
"bytes": "6286"
},
{
"name": "M4",
"bytes": "36443"
},
{
"name": "Makefile",
"bytes": "161153"
},
{
"name": "Python",
"bytes": "3379923"
},
{
"name": "Shell",
"bytes": "134708"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from ievv_opensource.ievv_model_mommy_extras import postgres_field_generators
class ModelMommyExtrasAppConfig(AppConfig):
name = 'ievv_opensource.ievv_model_mommy_extras'
verbose_name = "IEVV model mommy extras"
def ready(self):
postgres_field_generators.add_to_model_bakery()
| {
"content_hash": "7da1b5cd0926feab04cf9524806bdd5e",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 77,
"avg_line_length": 33.4,
"alnum_prop": 0.7634730538922155,
"repo_name": "appressoas/ievv_opensource",
"id": "c7f49326bbebbaa36c2928d185caff5eba6d534a",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ievv_opensource/ievv_model_mommy_extras/apps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "199"
},
{
"name": "Dockerfile",
"bytes": "162"
},
{
"name": "HTML",
"bytes": "7544"
},
{
"name": "JavaScript",
"bytes": "719"
},
{
"name": "Less",
"bytes": "27"
},
{
"name": "Python",
"bytes": "614046"
},
{
"name": "SCSS",
"bytes": "199"
},
{
"name": "Shell",
"bytes": "141"
},
{
"name": "TypeScript",
"bytes": "254"
}
],
"symlink_target": ""
} |
from distutils.core import setup
setup(
name='provisor',
version='0.2',
packages=['provisor'],
author='Hashbang Team',
author_email='team@hashbang.sh',
license='GPL 3.0',
description='Server that provisions new users on a Linux system',
long_description=open('README.md').read(),
install_requires=[
'flask',
'python-ldap'
]
)
| {
"content_hash": "47c3f6c60759bd8c8f8fe9428aa50bf8",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 23.9375,
"alnum_prop": 0.6240208877284595,
"repo_name": "hashbang/provisor",
"id": "ea12767b2955f612236f0c1019fd7d64123d2d58",
"size": "405",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1856"
},
{
"name": "Python",
"bytes": "18549"
}
],
"symlink_target": ""
} |
"""
Cryptographic capabilities for TLS.
"""
| {
"content_hash": "1067592660400204e7489d23514da6fa",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 35,
"avg_line_length": 14.666666666666666,
"alnum_prop": 0.7045454545454546,
"repo_name": "4shadoww/hakkuframework",
"id": "063697b25dddfbfd414de40e586faca70443508e",
"size": "218",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/scapy/layers/tls/crypto/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7992059"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from problems.problem import *
from helper.pom3 import pom3
__author__ = 'panzer'
class POM3B(Problem):
"""
POM 3B
"""
def __init__(self):
Problem.__init__(self)
self.name = POM3B.__name__
names = ["Culture", "Criticality", "Criticality Modifier", "Initial Known", "Inter-Dependency", "Dynamism",
"Size", "Plan", "Team Size"]
lows = [0.10, 0.82, 80, 0.40, 0, 1, 0, 0, 1]
ups = [0.90, 1.26, 95, 0.70, 100, 50, 2, 5, 20]
self.decisions = [Decision(names[i], lows[i], ups[i]) for i in range(len(names))]
self.objectives = [Objective("Cost", True, 0), Objective("Score", False, 0, 1),
Objective("Completion", False, 0, 1), Objective("Idle", True, 0, 1)]
def evaluate(self, decisions):
p = pom3()
return p.simulate(decisions)
| {
"content_hash": "95c0b5ceab626a8a12243871aac99f85",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 111,
"avg_line_length": 33.74074074074074,
"alnum_prop": 0.5949506037321625,
"repo_name": "rahlk/Experimental-Algorithms",
"id": "d54f23379b0eb5c0c135106ae27c9f04f9276c67",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/problems/pom3/pom3b.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gnuplot",
"bytes": "10360"
},
{
"name": "Makefile",
"bytes": "530"
},
{
"name": "Python",
"bytes": "229127"
},
{
"name": "Shell",
"bytes": "859"
},
{
"name": "TeX",
"bytes": "115740"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.dashboards.nikolaboard.catalogpanel import tables
class CatalogTab(tabs.Tab):
name = _("Overview")
slug = "service_catalog_tab"
template_name = "nikolaboard/catalogpanel/_catalog_detail.html"
preload = False
def allowed(self, request):
return True
def get_context_data(self, request):
return {"catalog": self.tab_group.kwargs['catalog']}
class CatalogTemplateTab(tabs.Tab):
name = _("Template")
slug = "catalog_template"
template_name = "nikolaboard/catalogpanel/_catalog_template.html"
def allowed(self, request):
return True
def get_context_data(self, request):
return {"catalog_template": self.tab_group.kwargs['catalog_template']}
class CatalogTabs(tabs.TabGroup):
slug = "catalogpanel_tabs"
tabs = (CatalogTab, CatalogTemplateTab)
sticky = True
| {
"content_hash": "512b0fa74be40a27992cfcbd2fc19364",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 78,
"avg_line_length": 26.92105263157895,
"alnum_prop": 0.7096774193548387,
"repo_name": "AlexOugh/horizon",
"id": "9b2cdc423ba55de475467a71f7f068b4e2281a6d",
"size": "1023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/nikolaboard/catalogpanel/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1000458"
},
{
"name": "JavaScript",
"bytes": "244031"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "4545176"
},
{
"name": "Shell",
"bytes": "18285"
}
],
"symlink_target": ""
} |
"""
Exception classes for OpenMDAO.
"""
from StringIO import StringIO
from traceback import print_exception
class ConstraintError(ValueError):
"""Raised when a constraint is violated."""
pass
class CircularDependencyError(RuntimeError):
"""Raised when a circular dependency occurs."""
pass
class RunInterrupted(RuntimeError):
"""Raised when *run()* was interrupted, implying an inconsistent state."""
pass
class RunStopped(RuntimeError):
"""Raised when *run()* was stopped, implying a consistent state but
not necessarily reflecting input values."""
pass
def traceback_str(exc):
"""Call this to get the traceback string associated with the given exception
or tuple of the form (exc_class, exc, traceback).
Returns the exception string if there is no traceback.
"""
if isinstance(exc, tuple) and len(exc) == 3:
s = StringIO()
print_exception(*exc, file=s)
return s.getvalue().strip()
try:
return exc.traceback
except AttributeError:
return str(exc)
def exception_str(exc):
"""Call this to get the exception string associated with the given exception
or tuple of the form (exc, traceback) or (exc_class, exc, traceback).
"""
if isinstance(exc, tuple) and len(exc) == 3:
return str(exc[1])
return str(exc)
| {
"content_hash": "5d279229b695ef582cae1062be2a9959",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 25.942307692307693,
"alnum_prop": 0.6782802075611564,
"repo_name": "DailyActie/Surrogate-Model",
"id": "b99cb4baa982ea0fad9ad99416b069a5691accae",
"size": "1349",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/OpenMDAO-Framework-dev/openmdao.main/src/openmdao/main/exceptions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
from pants.util.strutil import camelcase
DEFAULT_PACKAGE_PARSER = re.compile(r'^\s*package\s+([^;]+)\s*;\s*$')
OPTION_PARSER = re.compile(r'^\s*option\s+([^ =]+)\s*=\s*([^\s]+)\s*;\s*$')
SERVICE_PARSER = re.compile(r'^\s*(service)\s+([^\s{]+).*')
MESSAGE_PARSER = re.compile(r'^\s*(message)\s+([^\s{]+).*')
ENUM_PARSER = re.compile(r'^\s*(enum)\s+([^\s{]+).*')
EXTEND_PARSER = re.compile(r'^\s*(extend)\s+([^\s{]+).*')
PROTO_FILENAME_PATTERN = re.compile(r'^(.*).proto$')
class ProtobufParse(object):
"""Parses a .proto file. """
class InvalidProtoFilenameError(Exception):
"""Raised if an unexpected filename is passed"""
pass
def __init__(self, path, source):
"""
:param string path: base path to proto file
:param string source: relative path to proto file with respect to the base
"""
self.path = path
self.source = source
self.package = ''
self.multiple_files = False
self.services = set()
self.extends = set()
self.outer_class_name = get_outer_class_name(source)
# Note that nesting of types isn't taken into account
self.enums = set()
self.messages = set()
def parse(self):
lines = self._read_lines()
type_depth = 0
java_package = None
for line in lines:
match = DEFAULT_PACKAGE_PARSER.match(line)
if match:
self.package = match.group(1)
continue
else:
match = OPTION_PARSER.match(line)
if match:
name = match.group(1)
value = match.group(2).strip('"')
if 'java_package' == name:
java_package = value
elif 'java_outer_classname' == name:
self.outer_class_name = value
elif 'java_multiple_files' == name:
self.multiple_files = (value == 'true')
else:
uline = line.decode('utf-8').strip()
type_depth += uline.count('{') - uline.count('}')
match = SERVICE_PARSER.match(line)
update_type_list(match, type_depth, self.services)
if not match:
match = ENUM_PARSER.match(line)
if match:
update_type_list(match, type_depth, self.enums)
continue
match = MESSAGE_PARSER.match(line)
if match:
update_type_list(match, type_depth, self.messages)
continue
match = EXTEND_PARSER.match(line)
if match:
update_type_list(match, type_depth, self.extends)
continue
if java_package:
self.package = java_package
def _read_lines(self):
with open(self.path, 'r') as protobuf:
lines = protobuf.readlines()
return lines
@property
def filename(self):
''':return: the name of the file without the directory or .proto extension.'''
name = os.path.basename(self.path)
match = PROTO_FILENAME_PATTERN.match(name)
if not name:
raise self.InvalidProtoFilenameError("{0}does not end with .proto".format(self.path))
return match.group(1)
def update_type_list(match, type_depth, outer_types):
if match and type_depth < 2: # This takes care of the case where { } are on the same line.
type_name = match.group(2)
outer_types.add(type_name)
def get_outer_class_name(source):
filename = re.sub(r'\.proto$', '', os.path.basename(source))
return camelcase(filename)
| {
"content_hash": "27a3e0edd66e242af4d2e957337fbc2a",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 93,
"avg_line_length": 32.08181818181818,
"alnum_prop": 0.5996032870501559,
"repo_name": "megaserg/pants",
"id": "dc19e0c8f01e529712caccfe459f6f23fdb3a072",
"size": "3676",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/codegen/tasks/protobuf_parse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "307373"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4127534"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49640"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
} |
"""Functional tests using WebTest.
See: http://webtest.readthedocs.org/
"""
from flask import url_for
class TestLoggingIn:
"""Login."""
def test_can_log_in_returns_200(self, user, testapp):
"""Login successful."""
# Goes to homepage
res = testapp.get('/login')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
assert res.status_code == 200
def test_sees_alert_on_log_out(self, user, testapp):
"""Show alert on logout."""
res = testapp.get('/login')
# Fills out login form in navbar
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'myprecious'
# Submits
res = form.submit().follow()
res = testapp.get(url_for('security.logout')).follow()
# sees alert
assert 'fa fa-sign-in' in res
def test_sees_error_message_if_password_is_incorrect(self, user, testapp):
"""Show error if password is incorrect."""
# Goes to homepage
res = testapp.get('/login')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = user.username
form['password'] = 'wrong'
# Submits
res = form.submit()
# sees error
assert 'Invalid username or password!' in res
def test_sees_error_message_if_username_doesnt_exist(self, user, testapp):
"""Show error if username doesn't exist."""
# Goes to homepage
res = testapp.get('/login')
# Fills out login form, password incorrect
form = res.forms['loginForm']
form['username'] = 'unknown'
form['password'] = 'myprecious'
# Submits
res = form.submit()
# sees error
assert 'Invalid username or password!' in res
| {
"content_hash": "3548482a7bf34b28f7c1efa877d916fe",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 78,
"avg_line_length": 32.983333333333334,
"alnum_prop": 0.5816068721576554,
"repo_name": "makerhanoi/tagio",
"id": "7c5293b1bd68fad8577409ccdc605442a4eb44f3",
"size": "2003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_functional.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "14405"
},
{
"name": "JavaScript",
"bytes": "210801"
},
{
"name": "Python",
"bytes": "37286"
}
],
"symlink_target": ""
} |
import subprocess
import logging
from . import common
LOG = logging.getLogger(__name__)
def process_mp3(input_path, output_path):
try:
duration = common.get_media_duration(input_path)
if duration > 20:
start_time = duration / 2
else:
start_time = 0
command = "ffmpeg " \
"-ss {start_time:d} " \
"-i {input} " \
"-to {end_time:d} " \
"-af 'afade=t=in:ss=0:d=1,afade=t=out:ss=11:d=13' " \
"{output} -y".format(start_time=int(start_time),
end_time=13,
input=input_path,
output=output_path,
)
res = subprocess.check_output(command, shell=True)
except subprocess.CalledProcessError as exc:
print ("Failed to process {}: {}".format(input_path, exc))
return None
else:
return res.decode() | {
"content_hash": "d070a1c3d600382a386195ca876da585",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 34.03333333333333,
"alnum_prop": 0.4671890303623898,
"repo_name": "vladimiroff/humble-media",
"id": "e7f7a1c68f448866d420203064b470cfac82dc08",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "humblemedia/resources/utils/audio_clip.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2504"
},
{
"name": "JavaScript",
"bytes": "28011"
},
{
"name": "Python",
"bytes": "69551"
}
],
"symlink_target": ""
} |
from netmiko import ConnectHandler
from getpass import getpass
password = getpass()
secret = getpass("Enter secret: ")
cisco1 = {
"device_type": "cisco_ios",
"host": "cisco1.lasthop.io",
"username": "pyclass",
"password": password,
"secret": secret,
}
net_connect = ConnectHandler(**cisco1)
# Call 'enable()' method to elevate privileges
net_connect.enable()
print(net_connect.find_prompt())
| {
"content_hash": "c857961cd878ca02f9803c1d7a434edc",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 46,
"avg_line_length": 23.055555555555557,
"alnum_prop": 0.689156626506024,
"repo_name": "ktbyers/netmiko",
"id": "a8ec0c09e0dc9fceeaed4d6dfb60b1da50adabd9",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/enable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "384"
},
{
"name": "Python",
"bytes": "726727"
},
{
"name": "Shell",
"bytes": "21540"
}
],
"symlink_target": ""
} |
import os
from topaz.module import ClassDef
from topaz.objects.objectobject import W_Object
class W_EnvObject(W_Object):
classdef = ClassDef("EnviromentVariables", W_Object.classdef)
@classdef.setup_class
def setup_class(cls, space, w_cls):
space.set_const(space.w_object, "ENV", cls(space))
@classdef.method("class")
def method_class(self, space):
return space.w_object
@classdef.method("[]", key="str")
def method_subscript(self, space, key):
if "\0" in key:
raise space.error(space.w_ArgumentError, "bad environment variable name")
try:
val = os.environ[key]
except KeyError:
return space.w_nil
return space.newstr_fromstr(val)
@classdef.method("[]=", key="str", value="str")
def method_subscript_assign(self, space, key, value):
if "\0" in key:
raise space.error(space.w_ArgumentError, "bad environment variable name")
if "\0" in value:
raise space.error(space.w_ArgumentError, "bad environment variable value")
os.environ[key] = value
return space.newstr_fromstr(value)
@classdef.method("delete", key="str")
def method_delete(self, space, key, block=None):
if "\0" in key:
raise space.error(space.w_ArgumentError, "bad environment variable name")
try:
val = os.environ[key]
except KeyError:
if block is not None:
space.invoke_block(block, [space.newstr_fromstr(key)])
return space.w_nil
del os.environ[key]
return space.newstr_fromstr(val)
| {
"content_hash": "b2cf1947d60bb7ca2161107a1a2f21fb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 86,
"avg_line_length": 34.1875,
"alnum_prop": 0.6166971358927483,
"repo_name": "kachick/topaz",
"id": "bcb62104da3b525aa42c923e7bf55e379372bcaf",
"size": "1641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "topaz/objects/envobject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1112159"
},
{
"name": "Ruby",
"bytes": "199941"
},
{
"name": "Shell",
"bytes": "7755"
}
],
"symlink_target": ""
} |
"""Support for LIFX lights."""
import asyncio
from datetime import timedelta
from functools import partial
import logging
import math
import aiolifx as aiolifx_module
import aiolifx_effects as aiolifx_effects_module
from awesomeversion import AwesomeVersion
import voluptuous as vol
from homeassistant import util
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_XY_COLOR,
COLOR_GROUP,
DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
VALID_BRIGHTNESS,
VALID_BRIGHTNESS_PCT,
LightEntity,
preprocess_turn_on_alternatives,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_MODEL,
ATTR_SW_VERSION,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.device_registry as dr
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.event import async_track_point_in_utc_time
import homeassistant.util.color as color_util
from . import (
CONF_BROADCAST,
CONF_PORT,
CONF_SERVER,
DATA_LIFX_MANAGER,
DOMAIN as LIFX_DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
DISCOVERY_INTERVAL = 60
MESSAGE_TIMEOUT = 1.0
MESSAGE_RETRIES = 8
UNAVAILABLE_GRACE = 90
FIX_MAC_FW = AwesomeVersion("3.70")
SERVICE_LIFX_SET_STATE = "set_state"
ATTR_INFRARED = "infrared"
ATTR_ZONES = "zones"
ATTR_POWER = "power"
LIFX_SET_STATE_SCHEMA = cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
ATTR_INFRARED: vol.All(vol.Coerce(int), vol.Clamp(min=0, max=255)),
ATTR_ZONES: vol.All(cv.ensure_list, [cv.positive_int]),
ATTR_POWER: cv.boolean,
}
)
SERVICE_EFFECT_PULSE = "effect_pulse"
SERVICE_EFFECT_COLORLOOP = "effect_colorloop"
SERVICE_EFFECT_STOP = "effect_stop"
ATTR_POWER_ON = "power_on"
ATTR_PERIOD = "period"
ATTR_CYCLES = "cycles"
ATTR_SPREAD = "spread"
ATTR_CHANGE = "change"
PULSE_MODE_BLINK = "blink"
PULSE_MODE_BREATHE = "breathe"
PULSE_MODE_PING = "ping"
PULSE_MODE_STROBE = "strobe"
PULSE_MODE_SOLID = "solid"
PULSE_MODES = [
PULSE_MODE_BLINK,
PULSE_MODE_BREATHE,
PULSE_MODE_PING,
PULSE_MODE_STROBE,
PULSE_MODE_SOLID,
]
LIFX_EFFECT_SCHEMA = {
vol.Optional(ATTR_POWER_ON, default=True): cv.boolean,
}
LIFX_EFFECT_PULSE_SCHEMA = cv.make_entity_service_schema(
{
**LIFX_EFFECT_SCHEMA,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
vol.Exclusive(ATTR_COLOR_NAME, COLOR_GROUP): cv.string,
vol.Exclusive(ATTR_RGB_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.byte, cv.byte, cv.byte))
),
vol.Exclusive(ATTR_XY_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple), vol.ExactSequence((cv.small_float, cv.small_float))
),
vol.Exclusive(ATTR_HS_COLOR, COLOR_GROUP): vol.All(
vol.Coerce(tuple),
vol.ExactSequence(
(
vol.All(vol.Coerce(float), vol.Range(min=0, max=360)),
vol.All(vol.Coerce(float), vol.Range(min=0, max=100)),
)
),
),
vol.Exclusive(ATTR_COLOR_TEMP, COLOR_GROUP): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Exclusive(ATTR_KELVIN, COLOR_GROUP): cv.positive_int,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Range(min=0.05)),
ATTR_CYCLES: vol.All(vol.Coerce(float), vol.Range(min=1)),
ATTR_MODE: vol.In(PULSE_MODES),
}
)
LIFX_EFFECT_COLORLOOP_SCHEMA = cv.make_entity_service_schema(
{
**LIFX_EFFECT_SCHEMA,
ATTR_BRIGHTNESS: VALID_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT: VALID_BRIGHTNESS_PCT,
ATTR_PERIOD: vol.All(vol.Coerce(float), vol.Clamp(min=0.05)),
ATTR_CHANGE: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_SPREAD: vol.All(vol.Coerce(float), vol.Clamp(min=0, max=360)),
ATTR_TRANSITION: cv.positive_float,
}
)
LIFX_EFFECT_STOP_SCHEMA = cv.make_entity_service_schema({})
def aiolifx():
"""Return the aiolifx module."""
return aiolifx_module
def aiolifx_effects():
"""Return the aiolifx_effects module."""
return aiolifx_effects_module
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the LIFX light platform. Obsolete."""
_LOGGER.warning("LIFX no longer works with light platform configuration")
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up LIFX from a config entry."""
# Priority 1: manual config
if not (interfaces := hass.data[LIFX_DOMAIN].get(DOMAIN)):
# Priority 2: scanned interfaces
lifx_ip_addresses = await aiolifx().LifxScan(hass.loop).scan()
interfaces = [{CONF_SERVER: ip} for ip in lifx_ip_addresses]
if not interfaces:
# Priority 3: default interface
interfaces = [{}]
platform = entity_platform.async_get_current_platform()
lifx_manager = LIFXManager(hass, platform, async_add_entities)
hass.data[DATA_LIFX_MANAGER] = lifx_manager
for interface in interfaces:
lifx_manager.start_discovery(interface)
return True
def lifx_features(bulb):
"""Return a feature map for this bulb, or a default map if unknown."""
return aiolifx().products.features_map.get(
bulb.product
) or aiolifx().products.features_map.get(1)
def find_hsbk(hass, **kwargs):
"""Find the desired color from a number of possible inputs."""
hue, saturation, brightness, kelvin = [None] * 4
preprocess_turn_on_alternatives(hass, kwargs)
if ATTR_HS_COLOR in kwargs:
hue, saturation = kwargs[ATTR_HS_COLOR]
elif ATTR_RGB_COLOR in kwargs:
hue, saturation = color_util.color_RGB_to_hs(*kwargs[ATTR_RGB_COLOR])
elif ATTR_XY_COLOR in kwargs:
hue, saturation = color_util.color_xy_to_hs(*kwargs[ATTR_XY_COLOR])
if hue is not None:
hue = int(hue / 360 * 65535)
saturation = int(saturation / 100 * 65535)
kelvin = 3500
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(
color_util.color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
)
saturation = 0
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
hsbk = [hue, saturation, brightness, kelvin]
return None if hsbk == [None] * 4 else hsbk
def merge_hsbk(base, change):
"""Copy change on top of base, except when None."""
if change is None:
return None
return [b if c is None else c for b, c in zip(base, change)]
class LIFXManager:
"""Representation of all known LIFX entities."""
def __init__(self, hass, platform, async_add_entities):
"""Initialize the light."""
self.entities = {}
self.hass = hass
self.platform = platform
self.async_add_entities = async_add_entities
self.effects_conductor = aiolifx_effects().Conductor(hass.loop)
self.discoveries = []
self.cleanup_unsub = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, self.cleanup
)
self.register_set_state()
self.register_effects()
def start_discovery(self, interface):
"""Start discovery on a network interface."""
kwargs = {"discovery_interval": DISCOVERY_INTERVAL}
if broadcast_ip := interface.get(CONF_BROADCAST):
kwargs["broadcast_ip"] = broadcast_ip
lifx_discovery = aiolifx().LifxDiscovery(self.hass.loop, self, **kwargs)
kwargs = {}
if listen_ip := interface.get(CONF_SERVER):
kwargs["listen_ip"] = listen_ip
if listen_port := interface.get(CONF_PORT):
kwargs["listen_port"] = listen_port
lifx_discovery.start(**kwargs)
self.discoveries.append(lifx_discovery)
@callback
def cleanup(self, event=None):
"""Release resources."""
self.cleanup_unsub()
for discovery in self.discoveries:
discovery.cleanup()
for service in (
SERVICE_LIFX_SET_STATE,
SERVICE_EFFECT_STOP,
SERVICE_EFFECT_PULSE,
SERVICE_EFFECT_COLORLOOP,
):
self.hass.services.async_remove(LIFX_DOMAIN, service)
def register_set_state(self):
"""Register the LIFX set_state service call."""
self.platform.async_register_entity_service(
SERVICE_LIFX_SET_STATE, LIFX_SET_STATE_SCHEMA, "set_state"
)
def register_effects(self):
"""Register the LIFX effects as hass service calls."""
async def service_handler(service):
"""Apply a service, i.e. start an effect."""
entities = await self.platform.async_extract_from_service(service)
if entities:
await self.start_effect(entities, service.service, **service.data)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_PULSE,
service_handler,
schema=LIFX_EFFECT_PULSE_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_COLORLOOP,
service_handler,
schema=LIFX_EFFECT_COLORLOOP_SCHEMA,
)
self.hass.services.async_register(
LIFX_DOMAIN,
SERVICE_EFFECT_STOP,
service_handler,
schema=LIFX_EFFECT_STOP_SCHEMA,
)
async def start_effect(self, entities, service, **kwargs):
"""Start a light effect on entities."""
bulbs = [light.bulb for light in entities]
if service == SERVICE_EFFECT_PULSE:
effect = aiolifx_effects().EffectPulse(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
cycles=kwargs.get(ATTR_CYCLES),
mode=kwargs.get(ATTR_MODE),
hsbk=find_hsbk(self.hass, **kwargs),
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_COLORLOOP:
preprocess_turn_on_alternatives(self.hass, kwargs)
brightness = None
if ATTR_BRIGHTNESS in kwargs:
brightness = convert_8_to_16(kwargs[ATTR_BRIGHTNESS])
effect = aiolifx_effects().EffectColorloop(
power_on=kwargs.get(ATTR_POWER_ON),
period=kwargs.get(ATTR_PERIOD),
change=kwargs.get(ATTR_CHANGE),
spread=kwargs.get(ATTR_SPREAD),
transition=kwargs.get(ATTR_TRANSITION),
brightness=brightness,
)
await self.effects_conductor.start(effect, bulbs)
elif service == SERVICE_EFFECT_STOP:
await self.effects_conductor.stop(bulbs)
@callback
def register(self, bulb):
"""Handle aiolifx detected bulb."""
self.hass.async_create_task(self.register_new_bulb(bulb))
async def register_new_bulb(self, bulb):
"""Handle newly detected bulb."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
entity.registered = True
_LOGGER.debug("%s register AGAIN", entity.who)
await entity.update_hass()
else:
_LOGGER.debug("%s register NEW", bulb.ip_addr)
# Read initial state
ack = AwaitAioLIFX().wait
# Used to populate sw_version
# no need to wait as we do not
# need it until later
bulb.get_hostfirmware()
color_resp = await ack(bulb.get_color)
if color_resp:
version_resp = await ack(bulb.get_version)
if color_resp is None or version_resp is None:
_LOGGER.error("Failed to initialize %s", bulb.ip_addr)
bulb.registered = False
else:
bulb.timeout = MESSAGE_TIMEOUT
bulb.retry_count = MESSAGE_RETRIES
bulb.unregister_timeout = UNAVAILABLE_GRACE
if lifx_features(bulb)["multizone"]:
entity = LIFXStrip(bulb, self.effects_conductor)
elif lifx_features(bulb)["color"]:
entity = LIFXColor(bulb, self.effects_conductor)
else:
entity = LIFXWhite(bulb, self.effects_conductor)
_LOGGER.debug("%s register READY", entity.who)
self.entities[bulb.mac_addr] = entity
self.async_add_entities([entity], True)
@callback
def unregister(self, bulb):
"""Handle aiolifx disappearing bulbs."""
if bulb.mac_addr in self.entities:
entity = self.entities[bulb.mac_addr]
_LOGGER.debug("%s unregister", entity.who)
entity.registered = False
entity.async_write_ha_state()
class AwaitAioLIFX:
"""Wait for an aiolifx callback and return the message."""
def __init__(self):
"""Initialize the wrapper."""
self.message = None
self.event = asyncio.Event()
@callback
def callback(self, bulb, message):
"""Handle responses."""
self.message = message
self.event.set()
async def wait(self, method):
"""Call an aiolifx method and wait for its response."""
self.message = None
self.event.clear()
method(callb=self.callback)
await self.event.wait()
return self.message
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value
def convert_16_to_8(value):
"""Scale a 16 bit level into 8 bits."""
return value >> 8
class LIFXLight(LightEntity):
"""Representation of a LIFX light."""
def __init__(self, bulb, effects_conductor):
"""Initialize the light."""
self.bulb = bulb
self.effects_conductor = effects_conductor
self.registered = True
self.postponed_update = None
self.lock = asyncio.Lock()
def get_mac_addr(self):
"""Increment the last byte of the mac address by one for FW>3.70."""
if (
self.bulb.host_firmware_version
and AwesomeVersion(self.bulb.host_firmware_version) >= FIX_MAC_FW
):
octets = [int(octet, 16) for octet in self.bulb.mac_addr.split(":")]
octets[5] = (octets[5] + 1) % 256
return ":".join(f"{octet:02x}" for octet in octets)
return self.bulb.mac_addr
@property
def device_info(self) -> DeviceInfo:
"""Return information about the device."""
_map = aiolifx().products.product_map
info = DeviceInfo(
identifiers={(LIFX_DOMAIN, self.unique_id)},
connections={(dr.CONNECTION_NETWORK_MAC, self.get_mac_addr())},
manufacturer="LIFX",
name=self.name,
)
if (model := (_map.get(self.bulb.product) or self.bulb.product)) is not None:
info[ATTR_MODEL] = str(model)
if (version := self.bulb.host_firmware_version) is not None:
info[ATTR_SW_VERSION] = version
return info
@property
def available(self):
"""Return the availability of the bulb."""
return self.registered
@property
def unique_id(self):
"""Return a unique ID."""
return self.bulb.mac_addr
@property
def name(self):
"""Return the name of the bulb."""
return self.bulb.label
@property
def who(self):
"""Return a string identifying the bulb."""
return f"{self.bulb.ip_addr} ({self.name})"
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["max_kelvin"]
return math.floor(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
kelvin = lifx_features(self.bulb)["min_kelvin"]
return math.ceil(color_util.color_temperature_kelvin_to_mired(kelvin))
@property
def supported_features(self):
"""Flag supported features."""
support = SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_EFFECT
bulb_features = lifx_features(self.bulb)
if bulb_features["min_kelvin"] != bulb_features["max_kelvin"]:
support |= SUPPORT_COLOR_TEMP
return support
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
fade = self.bulb.power_level / 65535
return convert_16_to_8(int(fade * self.bulb.color[2]))
@property
def color_temp(self):
"""Return the color temperature."""
_, sat, _, kelvin = self.bulb.color
if sat:
return None
return color_util.color_temperature_kelvin_to_mired(kelvin)
@property
def is_on(self):
"""Return true if light is on."""
return self.bulb.power_level != 0
@property
def effect(self):
"""Return the name of the currently running effect."""
effect = self.effects_conductor.effect(self.bulb)
if effect:
return f"lifx_effect_{effect.name}"
return None
async def update_hass(self, now=None):
"""Request new status and push it to hass."""
self.postponed_update = None
await self.async_update()
self.async_write_ha_state()
async def update_during_transition(self, when):
"""Update state at the start and end of a transition."""
if self.postponed_update:
self.postponed_update()
# Transition has started
await self.update_hass()
# Transition has ended
if when > 0:
self.postponed_update = async_track_point_in_utc_time(
self.hass,
self.update_hass,
util.dt.utcnow() + timedelta(milliseconds=when),
)
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
kwargs[ATTR_POWER] = True
self.hass.async_create_task(self.set_state(**kwargs))
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
kwargs[ATTR_POWER] = False
self.hass.async_create_task(self.set_state(**kwargs))
async def set_state(self, **kwargs):
"""Set a color on the light and turn it on/off."""
async with self.lock:
bulb = self.bulb
await self.effects_conductor.stop([bulb])
if ATTR_EFFECT in kwargs:
await self.default_effect(**kwargs)
return
if ATTR_INFRARED in kwargs:
bulb.set_infrared(convert_8_to_16(kwargs[ATTR_INFRARED]))
if ATTR_TRANSITION in kwargs:
fade = int(kwargs[ATTR_TRANSITION] * 1000)
else:
fade = 0
# These are both False if ATTR_POWER is not set
power_on = kwargs.get(ATTR_POWER, False)
power_off = not kwargs.get(ATTR_POWER, True)
hsbk = find_hsbk(self.hass, **kwargs)
# Send messages, waiting for ACK each time
ack = AwaitAioLIFX().wait
if not self.is_on:
if power_off:
await self.set_power(ack, False)
# If fading on with color, set color immediately
if hsbk and power_on:
await self.set_color(ack, hsbk, kwargs)
await self.set_power(ack, True, duration=fade)
elif hsbk:
await self.set_color(ack, hsbk, kwargs, duration=fade)
elif power_on:
await self.set_power(ack, True, duration=fade)
else:
if power_on:
await self.set_power(ack, True)
if hsbk:
await self.set_color(ack, hsbk, kwargs, duration=fade)
if power_off:
await self.set_power(ack, False, duration=fade)
# Avoid state ping-pong by holding off updates as the state settles
await asyncio.sleep(0.3)
# Update when the transition starts and ends
await self.update_during_transition(fade)
async def set_power(self, ack, pwr, duration=0):
"""Send a power change to the bulb."""
await ack(partial(self.bulb.set_power, pwr, duration=duration))
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
hsbk = merge_hsbk(self.bulb.color, hsbk)
await ack(partial(self.bulb.set_color, hsbk, duration=duration))
async def default_effect(self, **kwargs):
"""Start an effect with default parameters."""
service = kwargs[ATTR_EFFECT]
data = {ATTR_ENTITY_ID: self.entity_id}
await self.hass.services.async_call(
LIFX_DOMAIN, service, data, context=self._context
)
async def async_update(self):
"""Update bulb status."""
if self.available and not self.lock.locked():
await AwaitAioLIFX().wait(self.bulb.get_color)
class LIFXWhite(LIFXLight):
"""Representation of a white-only LIFX light."""
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
class LIFXColor(LIFXLight):
"""Representation of a color LIFX light."""
@property
def supported_features(self):
"""Flag supported features."""
support = super().supported_features
support |= SUPPORT_COLOR
return support
@property
def effect_list(self):
"""Return the list of supported effects for this light."""
return [SERVICE_EFFECT_COLORLOOP, SERVICE_EFFECT_PULSE, SERVICE_EFFECT_STOP]
@property
def hs_color(self):
"""Return the hs value."""
hue, sat, _, _ = self.bulb.color
hue = hue / 65535 * 360
sat = sat / 65535 * 100
return (hue, sat) if sat else None
class LIFXStrip(LIFXColor):
"""Representation of a LIFX light strip with multiple zones."""
async def set_color(self, ack, hsbk, kwargs, duration=0):
"""Send a color change to the bulb."""
bulb = self.bulb
num_zones = len(bulb.color_zones)
if (zones := kwargs.get(ATTR_ZONES)) is None:
# Fast track: setting all zones to the same brightness and color
# can be treated as a single-zone bulb.
if hsbk[2] is not None and hsbk[3] is not None:
await super().set_color(ack, hsbk, kwargs, duration)
return
zones = list(range(0, num_zones))
else:
zones = [x for x in set(zones) if x < num_zones]
# Zone brightness is not reported when powered off
if not self.is_on and hsbk[2] is None:
await self.set_power(ack, True)
await asyncio.sleep(0.3)
await self.update_color_zones()
await self.set_power(ack, False)
await asyncio.sleep(0.3)
# Send new color to each zone
for index, zone in enumerate(zones):
zone_hsbk = merge_hsbk(bulb.color_zones[zone], hsbk)
apply = 1 if (index == len(zones) - 1) else 0
set_zone = partial(
bulb.set_color_zones,
start_index=zone,
end_index=zone,
color=zone_hsbk,
duration=duration,
apply=apply,
)
await ack(set_zone)
async def async_update(self):
"""Update strip status."""
if self.available and not self.lock.locked():
await super().async_update()
await self.update_color_zones()
async def update_color_zones(self):
"""Get updated color information for each zone."""
zone = 0
top = 1
while self.available and zone < top:
# Each get_color_zones can update 8 zones at once
resp = await AwaitAioLIFX().wait(
partial(self.bulb.get_color_zones, start_index=zone)
)
if resp:
zone += 8
top = resp.count
# We only await multizone responses so don't ask for just one
if zone == top - 1:
zone -= 1
| {
"content_hash": "20a30cb2378c20c57002cd090ed6162a",
"timestamp": "",
"source": "github",
"line_count": 764,
"max_line_length": 86,
"avg_line_length": 32.6413612565445,
"alnum_prop": 0.5968802630523699,
"repo_name": "home-assistant/home-assistant",
"id": "998b99ef88f78f2526fa27c1e6c35aae0c30745d",
"size": "24938",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/lifx/light.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
def search(node):
if not node:
return None
l = search(node.left)
if isinstance(l, TreeNode):
return l
r = search(node.right)
if isinstance(r, TreeNode):
return r
if (l, r) in ((1, 2), (2, 1)):
return node
if (node == p) and (2 in (l, r)):
return node
if (node == q) and (1 in (l, r)):
return node
if node == p:
return 1
if node == q:
return 2
return l or r
return search(root)
| {
"content_hash": "d168c609466a322cc2f48908a6925279",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 47,
"avg_line_length": 21.52173913043478,
"alnum_prop": 0.41818181818181815,
"repo_name": "daicang/Leetcode-solutions",
"id": "240e99ab2bd16cd7718518c0952d110c0942d8a8",
"size": "1027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "236-lowest-common-ancestor-of-a-binary-tree.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91011"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
} |
from django.views.decorators.http import require_GET
from app.constants.index import INDEX_DIR, ALL_TYPES
from app.errors import PersOARequiredFieldError, PersOANotFound, PersOALeftoverField
from app.models.choice import BasicChoice, LinearChoice, SubChoice
from app.models.group import TraitGroup
from app.models.trait import BasicTrait, LinearTrait
from app.views.field import Field
from app.views.whitelist import Whitelist
from app.views.search import WhooshIndex
from app.views.sanitize import json_return, persoa_output
# Do any preparatory work before starting requests
WhooshIndex.get(INDEX_DIR)
############################################################
# Find
############################################################
whitelist = (Whitelist()
.add(Field(['query'], 'query', basestring)
.default(None)
)
.add(Field(['name'], 'name', basestring)
.default(None)
.setting(Field.SETTINGS_LIST)
)
.add(Field(['desc'], 'desc', basestring)
.default(None)
.setting(Field.SETTINGS_LIST)
)
.add(Field(['type'], 'type', basestring)
.default(PersOARequiredFieldError)
.setting(Field.SETTINGS_LIST)
)
# Paging
.add(Field(['pagelen', 'limit', 'max'], 'limit').default(10))
.add(Field(['page', 'page_num'], 'page').default(1))
.include(['combine'], 'combine')
.include(['trait', 'all'], 'trait')
.include(['trait_name','name'], 'trait_name')
.include(['choice', 'choices', 'all'], 'choice')
.include(['choice_name', 'name'], 'choice_name')
.include(['choice_desc', 'desc', 'details'], 'choice_desc')
.include(['group', 'group', 'all'], 'group')
.include(['group_name', 'name'], 'group_name')
)
@require_GET
@json_return
@persoa_output
def find(request, output=None):
# TODO add query
whitelist.clear()
# Translate the input
kwargs = whitelist.process(request.GET)
# Check the input for any problems
whitelist.leftover(PersOALeftoverField)
output.error(whitelist.errors())
# Prepare the input
if not(kwargs['query'] and kwargs['name'] and kwargs['desc'] and kwargs['type']):
# If whoosh doesn't return all items then...?
pass
if not kwargs['type'] is None:
types = kwargs['type']
kwargs['type'] = set()
for t in types:
if t == 'all':
kwargs['type'] = ALL_TYPES.values()
break
elif t in ALL_TYPES.keys():
kwargs['type'].add(ALL_TYPES[t])
elif t == 'trait':
kwargs['type'] = args['type'] | WhooshIndex.CLASSES['trait']
elif t == 'choice':
kwargs['type'] = args['type'] | WhooshIndex.CLASSES['choice']
# Find the Items
results = WhooshIndex.get(INDEX_DIR).search(**kwargs)
types = {}
for hit in results['results']:
if not hit['type'] in types.keys():
types[hit['type']] = []
types[hit['type']].append(hit)
# Get the items from the database and format them
include = kwargs[Whitelist.INCLUDE_NAME]
print include
found = {'all': []}
for cls in types.keys():
items = (WhooshIndex.CLASSES['index'][cls].objects
.select_related()
.filter(id__in=[i['id'] for i in types[cls]])
)
items = [i.details(include) for i in items]
# TODO: properly name this
found[cls] = items
found['all'] += items
if include['combine']:
found.pop('all')
# Format the result
found['page'] = results['page']
found['pages'] = results['pages']
found['total'] = results['total']
output.output(found)
| {
"content_hash": "eab4ca020e7f44268cfcbbd3a2b8d1df",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 85,
"avg_line_length": 32.60176991150443,
"alnum_prop": 0.5863192182410424,
"repo_name": "Saevon/PersOA",
"id": "fb68ee9e676b31d5e8e7d7a2cd597a83bffd5184",
"size": "3684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/find.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "249"
},
{
"name": "Python",
"bytes": "68102"
}
],
"symlink_target": ""
} |
from flask import Flask, request
from flask import render_template
from jinja2 import Markup
from brain import Brain
import note
import re
import jangopath
Make_Notes = 0
Internet_Check = 0
def strip_non_ascii(string):
stripped = (c for c in string if 0 < ord(c) < 127)
return ''.join(stripped)
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def start():
global Make_Notes, internet_Check
file = open(jangopath.HOME_DIR + "/ans.txt", 'w+')
file.close()
if request.method=='GET':
res = ""
return render_template('test.html',res=res)
else:
brain = Brain()
query = request.form['url']
if bool(re.search('make', query, re.IGNORECASE))==True or bool(re.search('write', query, re.IGNORECASE))==True:
Make_Notes = 1
return render_template('test.html',res="What shall I note down?")
if Make_Notes==1:
Make_Notes=0
note.handle(query)
else:
brain.query(query)
f = open(jangopath.HOME_DIR + "/ans.txt",'r')
res = ""
line = ""
while True:
line = f.readline()
if line:
res = res+line
else:
break
f.close()
res = strip_non_ascii(res)
res = res.replace('\n','')
res = Markup(res)
if bool(re.search('search', query, re.IGNORECASE))==True:
print "1"
return render_template('test.html',res=res)
else:
print "2"
return render_template('test.html',res2=res)
if __name__ == '__main__':
app.run(debug=True) | {
"content_hash": "fa9c2a79387cc7d4ea6084d014181445",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 117,
"avg_line_length": 26.551724137931036,
"alnum_prop": 0.5857142857142857,
"repo_name": "rahulxxarora/Jango",
"id": "f28898591961cc0d33c8a84e9916d274587e9317",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jango/Interactive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3673"
},
{
"name": "Python",
"bytes": "22091"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Member, ResearchGroup, Division
class ImmutableModelMixin(object):
"""A mixin class for a ModelAdmin which should never allow adding or
deleting objects. Use this for models which are immutable and managed
entirely by migrations.
"""
def has_add_permission(self, request):
# pylint: disable=unused-argument
return False
def has_delete_permission(self, request, obj=None):
# pylint: disable=unused-argument
return False
class MemberAdmin(admin.ModelAdmin):
list_display = ('crsid', 'full_name', 'division', 'research_group',
'is_active')
list_filter = ('is_active',)
def get_queryset(self, request):
return Member.objects.order_by('user__username')
def crsid(self, obj):
return obj.user.username
crsid.admin_order_field = 'user__username'
def full_name(self, obj):
return obj.user.get_full_name()
def division(self, obj):
if obj.research_group is None:
return None
return obj.research_group.division
division.admin_order_field = 'research_group__division__letter'
def research_group(self, obj):
return obj.research_group
research_group.admin_order_field = 'research_group__name'
admin.site.register(Member, MemberAdmin)
class ResearchGroupAdmin(ImmutableModelMixin, admin.ModelAdmin):
list_display = ('name', 'division')
def get_queryset(self, request):
return ResearchGroup.objects.order_by('division__letter', 'name')
admin.site.register(ResearchGroup, ResearchGroupAdmin)
class DivisionAdmin(ImmutableModelMixin, admin.ModelAdmin):
list_display = ('letter', 'name')
def get_queryset(self, request):
return Division.objects.order_by('letter')
admin.site.register(Division, DivisionAdmin)
| {
"content_hash": "a0ff0d038bb793dbb51320bcc96f6e87",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 30.62295081967213,
"alnum_prop": 0.6873661670235546,
"repo_name": "rjw57/edpcmentoring",
"id": "20763111386abb758498358fddf855128773a74b",
"size": "1868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "edpcmentoring/cuedmembers/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128844"
},
{
"name": "HTML",
"bytes": "33481"
},
{
"name": "JavaScript",
"bytes": "199702"
},
{
"name": "Python",
"bytes": "83753"
},
{
"name": "Shell",
"bytes": "514"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import copy
from collections import deque
from pants.backend.core.tasks.console_task import ConsoleTask
from pants.base.exceptions import TaskError
from pants.util.strutil import pluralize
def format_path(path):
return '[{}]'.format(', '.join([target.address.reference() for target in path]))
def find_paths_breadth_first(from_target, to_target, log):
"""Yields the paths between from_target to to_target if they exist.
The paths are returned ordered by length, shortest first.
If there are cycles, it checks visited edges to prevent recrossing them."""
log.debug('Looking for all paths from {} to {}'.format(from_target.address.reference(),
to_target.address.reference()))
if from_target == to_target:
yield [from_target]
return
visited_edges = set()
to_walk_paths = deque([[from_target]])
while len(to_walk_paths) > 0:
cur_path = to_walk_paths.popleft()
target = cur_path[-1]
if len(cur_path) > 1:
prev_target = cur_path[-2]
else:
prev_target = None
current_edge = (prev_target, target)
if current_edge not in visited_edges:
for dep in target.dependencies:
dep_path = cur_path + [dep]
if dep == to_target:
yield dep_path
else:
to_walk_paths.append(dep_path)
visited_edges.add(current_edge)
class PathFinder(ConsoleTask):
def __init__(self, *args, **kwargs):
super(PathFinder, self).__init__(*args, **kwargs)
self.log = self.context.log
self.target_roots = self.context.target_roots
def validate_target_roots(self):
if len(self.target_roots) != 2:
raise TaskError('Specify two targets please (found {})'.format(len(self.target_roots)))
class Path(PathFinder):
"""Find a dependency path from one target to another."""
def console_output(self, ignored_targets):
self.validate_target_roots()
from_target = self.target_roots[0]
to_target = self.target_roots[1]
for path in find_paths_breadth_first(from_target, to_target, self.log):
yield format_path(path)
break
else:
yield 'No path found from {} to {}!'.format(from_target.address.reference(),
to_target.address.reference())
class Paths(PathFinder):
"""Find all dependency paths from one target to another."""
def console_output(self, ignored_targets):
self.validate_target_roots()
from_target = self.target_roots[0]
to_target = self.target_roots[1]
paths = list(find_paths_breadth_first(from_target, to_target, self.log))
yield 'Found {}'.format(pluralize(len(paths), 'path'))
if paths:
yield ''
for path in paths:
yield '\t{}'.format(format_path(path))
| {
"content_hash": "a5c51ed4be006c71539e7131cd9107fc",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 93,
"avg_line_length": 32.08791208791209,
"alnum_prop": 0.6462328767123288,
"repo_name": "slyphon/pants",
"id": "676a9c4ca251641b42c38c3d8c222d050d366b29",
"size": "3067",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/tasks/paths.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "767"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "70362"
},
{
"name": "Java",
"bytes": "309840"
},
{
"name": "JavaScript",
"bytes": "28545"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4020643"
},
{
"name": "Scala",
"bytes": "85437"
},
{
"name": "Shell",
"bytes": "49550"
},
{
"name": "Thrift",
"bytes": "2858"
}
],
"symlink_target": ""
} |
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
import sys
from oslo.config import cfg
from nova.i18n import _
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import event as virtevent
driver_opts = [
cfg.StrOpt('compute_driver',
help='Driver to use for controlling virtualization. Options '
'include: libvirt.LibvirtDriver, xenapi.XenAPIDriver, '
'fake.FakeDriver, baremetal.BareMetalDriver, '
'vmwareapi.VMwareVCDriver, hyperv.HyperVDriver'),
cfg.StrOpt('default_ephemeral_format',
help='The default format an ephemeral_volume will be '
'formatted with on creation.'),
cfg.StrOpt('preallocate_images',
default='none',
help='VM image preallocation mode: '
'"none" => no storage provisioning is done up front, '
'"space" => storage is fully allocated at instance start'),
cfg.BoolOpt('use_cow_images',
default=True,
help='Whether to use cow images'),
cfg.BoolOpt('vif_plugging_is_fatal',
default=True,
help="Fail instance boot if vif plugging fails"),
cfg.IntOpt('vif_plugging_timeout',
default=300,
help='Number of seconds to wait for neutron vif plugging '
'events to arrive before continuing or failing (see '
'vif_plugging_is_fatal). If this is set to zero and '
'vif_plugging_is_fatal is False, events should not '
'be expected to arrive at all.'),
]
CONF = cfg.CONF
CONF.register_opts(driver_opts)
LOG = logging.getLogger(__name__)
def driver_dict_from_config(named_driver_config, *args, **kwargs):
driver_registry = dict()
for driver_str in named_driver_config:
driver_type, _sep, driver = driver_str.partition('=')
driver_class = importutils.import_class(driver)
driver_registry[driver_type] = driver_class(*args, **kwargs)
return driver_registry
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
capabilities = {
"has_imagecache": False,
"supports_recreate": False,
}
def __init__(self, virtapi):
self.virtapi = virtapi
self._compute_event_callback = None
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def cleanup_host(self, host):
"""Clean up anything that is necessary for the driver gracefully stop,
including ending remote sessions. This is optional.
"""
pass
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
:param instance: nova.objects.instance.Instance object
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance):
"""Checks existence of an instance on the host.
:param instance: The instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
try:
return instance.uuid in self.list_instance_uuids()
except NotImplementedError:
return instance.name in self.list_instances()
def estimate_instance_overhead(self, instance_info):
"""Estimate the virtualization overhead required to build an instance
of the given flavor.
Defaults to zero, drivers should override if per-instance overhead
calculations are desired.
:param instance_info: Instance/flavor to calculate overhead for.
:returns: Dict of estimated overhead values.
"""
return {'memory_mb': 0}
def list_instances(self):
"""Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instance_uuids(self):
"""Return the UUIDS of all the instances known to the virtualization
layer, as a list.
"""
raise NotImplementedError()
def rebuild(self, context, instance, image_meta, injected_files,
admin_password, bdms, detach_block_devices,
attach_block_devices, network_info=None,
recreate=False, block_device_info=None,
preserve_ephemeral=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
remakes the VM with given 'metadata' and 'personalities'.
This base class method shuts down the VM, detaches all block devices,
then spins up the new VM afterwards. It may be overridden by
hypervisors that need to - e.g. for optimisations, or when the 'VM'
is actually proxied and needs to be held across the shutdown + spin
up steps.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param bdms: block-device-mappings to use for rebuild
:param detach_block_devices: function to detach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param attach_block_devices: function to attach block devices. See
nova.compute.manager.ComputeManager:_rebuild_default_impl for
usage.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param recreate: True if the instance is being recreated on a new
hypervisor - all the cleanup of old state is skipped.
:param block_device_info: Information about block devices to be
attached to the instance.
:param preserve_ephemeral: True if the default ephemeral storage
partition must be preserved on rebuild
"""
raise NotImplementedError()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
"""Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: nova.objects.instance.Instance
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param injected_files: User files to inject into instance.
:param admin_password: Administrator password to set in instance.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy the specified instance from the Hypervisor.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def cleanup(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None, destroy_vifs=True):
"""Cleanup the instance resources .
Instance should have been destroyed from the Hypervisor before calling
this method.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def reboot(self, context, instance, network_info, reboot_type,
block_device_info=None, bad_volumes_callback=None):
"""Reboot the specified instance.
After this is called successfully, the instance's state
goes back to power_state.RUNNING. The virtualization
platform should ensure that the reboot action has completed
successfully even in cases in which the underlying domain/vm
is paused or halted/stopped.
:param instance: nova.objects.instance.Instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
:param block_device_info: Info pertaining to attached volumes
:param bad_volumes_callback: Function to handle any bad volumes
encountered
"""
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, context, instance):
"""Get console output for an instance
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_vnc_console(self, context, instance):
"""Get connection info for a vnc console.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_spice_console(self, context, instance):
"""Get connection info for a spice console.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_rdp_console(self, context, instance):
"""Get connection info for a rdp console.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_instance_diagnostics(self, instance):
"""Return data about VM diagnostics.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_all_bw_counters(self, instances):
"""Return bandwidth usage counters for each interface on each
running VM.
:param instances: nova.objects.instance.InstanceList
"""
raise NotImplementedError()
def get_all_volume_usage(self, context, compute_host_bdms):
"""Return usage info for volumes attached to vms on
a given host.-
"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
"""Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint,
encryption=None):
"""Detach the disk attached to the instance."""
raise NotImplementedError()
def swap_volume(self, old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
"""Replace the disk attached to the instance.
:param instance: nova.objects.instance.Instance
:param resize_to: This parameter is used to indicate the new volume
size when the new volume lager than old volume.
And the units is Gigabyte.
"""
raise NotImplementedError()
def attach_interface(self, instance, image_meta, vif):
"""Attach an interface to the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def detach_interface(self, instance, vif):
"""Detach an interface from the instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
"""Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def post_interrupted_snapshot_cleanup(self, context, instance):
"""Cleans up any resources left after an interrupted snapshot.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
:param instance: nova.objects.instance.Instance being migrated/resized
:param disk_info: the newly transferred disk information
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
:param resize_instance: True if the instance is being resized,
False otherwise
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize.
:param context: the context for the finish_revert_migration
:param instance: nova.objects.instance.Instance being migrated/resized
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
:param power_on: True if the instance should be powered on, False
otherwise
"""
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, context, instance, network_info, block_device_info=None):
"""resume the specified instance.
:param context: the context for the resume
:param instance: nova.objects.instance.Instance being resumed
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: instance volume block device info
"""
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
"""resume guest state when a host is booted.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
"""Rescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def set_bootable(self, instance, is_bootable):
"""Set the ability to power on/off an instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.
:param instance: nova.objects.instance.Instance
:param timeout: time to wait for GuestOS to shutdown
:param retry_interval: How often to signal guest while
waiting for it to shutdown
"""
raise NotImplementedError()
def power_on(self, context, instance, network_info,
block_device_info=None):
"""Power on the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def soft_delete(self, instance):
"""Soft delete the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def restore(self, instance):
"""Restore the specified instance.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_available_resource(self, nodename):
"""Retrieve resource information.
This method is called when nova-compute launches, and
as part of a periodic task that records the results in the DB.
:param nodename:
node which the caller want to get resources from
a driver that manages only one node can safely ignore this
:returns: Dictionary describing resources
"""
raise NotImplementedError()
def pre_live_migration(self, context, instance, block_device_info,
network_info, disk_info, migrate_data=None):
"""Prepare an instance for live migration
:param context: security context
:param instance: nova.objects.instance.Instance object
:param block_device_info: instance block device information
:param network_info: instance network information
:param disk_info: instance disk information
:param migrate_data: implementation specific data dict.
"""
raise NotImplementedError()
def live_migration(self, context, instance, dest,
post_method, recover_method, block_migration=False,
migrate_data=None):
"""Live migration of an instance to another host.
:param context: security context
:param instance:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager._post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager._rollback_live_migration.
:param block_migration: if true, migrate VM disk.
:param migrate_data: implementation specific params.
"""
raise NotImplementedError()
def rollback_live_migration_at_destination(self, context, instance,
network_info,
block_device_info,
destroy_disks=True,
migrate_data=None):
"""Clean up destination node after a failed live migration.
:param context: security context
:param instance: instance object that was being migrated
:param network_info: instance network information
:param block_device_info: instance block device information
:param destroy_disks:
if true, destroy disks at destination during cleanup
:param migrate_data: implementation specific params
"""
raise NotImplementedError()
def post_live_migration(self, context, instance, block_device_info,
migrate_data=None):
"""Post operation of live migration at source host.
:param context: security context
:instance: instance object that was migrated
:block_device_info: instance block device information
:param migrate_data: if not None, it is a dict which has data
"""
pass
def post_live_migration_at_source(self, context, instance, network_info):
"""Unplug VIFs from networks at source.
:param context: security context
:param instance: instance object reference
:param network_info: instance network information
"""
raise NotImplementedError(_("Hypervisor driver does not support "
"post_live_migration_at_source method"))
def post_live_migration_at_destination(self, context, instance,
network_info,
block_migration=False,
block_device_info=None):
"""Post operation of live migration at destination host.
:param context: security context
:param instance: instance object that is migrated
:param network_info: instance network information
:param block_migration: if true, post operation of block_migration.
"""
raise NotImplementedError()
def check_instance_shared_storage_local(self, context, instance):
"""Check if instance files located on shared storage.
This runs check on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
"""
raise NotImplementedError()
def check_instance_shared_storage_remote(self, context, data):
"""Check if instance files located on shared storage.
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
raise NotImplementedError()
def check_instance_shared_storage_cleanup(self, context, data):
"""Do cleanup on host after check_instance_shared_storage calls
:param context: security context
:param data: result of check_instance_shared_storage_local
"""
pass
def check_can_live_migrate_destination(self, context, instance,
src_compute_info, dst_compute_info,
block_migration=False,
disk_over_commit=False):
"""Check if it is possible to execute live migration.
This runs checks on the destination host, and then calls
back to the source host to check the results.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param src_compute_info: Info about the sending machine
:param dst_compute_info: Info about the receiving machine
:param block_migration: if true, prepare for block migration
:param disk_over_commit: if true, allow disk over commit
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def check_can_live_migrate_destination_cleanup(self, context,
dest_check_data):
"""Do required cleanup on dest host after check_can_live_migrate calls
:param context: security context
:param dest_check_data: result of check_can_live_migrate_destination
"""
raise NotImplementedError()
def check_can_live_migrate_source(self, context, instance,
dest_check_data):
"""Check if it is possible to execute live migration.
This checks if the live migration can succeed, based on the
results from check_can_live_migrate_destination.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
:param dest_check_data: result of check_can_live_migrate_destination
:returns: a dict containing migration info (hypervisor-dependent)
"""
raise NotImplementedError()
def get_instance_disk_info(self, instance_name,
block_device_info=None):
"""Retrieve information about actual disk sizes of an instance.
:param instance_name:
name of a nova instance as returned by list_instances()
:param block_device_info:
Optional; Can be used to filter out devices which are
actually volumes.
:return:
json strings with below format::
"[{'path':'disk',
'type':'raw',
'virt_disk_size':'10737418240',
'backing_file':'backing_file',
'disk_size':'83886080'
'over_committed_disk_size':'10737418240'},
...]"
"""
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_instance_security_rules(self, instance):
"""Refresh security group rules
Gets called when an instance gets added to or removed from
the security group the instance is a member of or if the
group gains or loses a rule.
"""
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:param instance: nova.objects.instance.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def filter_defer_apply_on(self):
"""Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
"""Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, instance, new_pass):
"""Set the root password on the specified instance.
:param instance: nova.objects.instance.Instance
:param new_password: the new password
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
NOTE(russellb) This method is deprecated and will be removed once it
can be removed from nova.compute.manager.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def change_instance_metadata(self, context, instance, diff):
"""Applies a diff to the instance metadata.
This is an optional driver method which is used to publish
changes to the instance's metadata to the hypervisor. If the
hypervisor has no means of publishing the instance metadata to
the instance, then this method should not be implemented.
:param context: security context
:param instance: nova.objects.instance.Instance
"""
pass
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout, instances):
"""Poll for rebooting instances
:param timeout: the currently configured timeout for considering
rebooting instances to be stuck
:param instances: instances that have been in rebooting state
longer than the configured timeout
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_uptime(self, host):
"""Returns the result of calling "uptime" on the target host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks.
:param instance: nova.objects.instance.Instance
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks.
:param instance: nova.objects.instance.Instance
"""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats.
If the hypervisor supports pci passthrough, the returned
dictionary includes a key-value pair for it.
The key of pci passthrough device is "pci_passthrough_devices"
and the value is a json string for the list of assignable
pci devices. Each device is a dictionary, with mandatory
keys of 'address', 'vendor_id', 'product_id', 'dev_type',
'dev_id', 'label' and other optional device specific information.
Refer to the objects/pci_device.py for more idea of these keys.
"""
raise NotImplementedError()
def get_host_cpu_stats(self):
"""Get the currently known host CPU stats.
:returns: a dict containing the CPU stat info, eg:
| {'kernel': kern,
| 'idle': idle,
| 'user': user,
| 'iowait': wait,
| 'frequency': freq},
where kern and user indicate the cumulative CPU time
(nanoseconds) spent by kernel and user processes
respectively, idle indicates the cumulative idle CPU time
(nanoseconds), wait indicates the cumulative I/O wait CPU
time (nanoseconds), since the host is booting up; freq
indicates the current CPU frequency (MHz). All values are
long integers.
"""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""Return performance counters associated with the given iface_id
on the given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def deallocate_networks_on_reschedule(self, instance):
"""Does the driver want networks deallocated on reschedule?"""
return False
def macs_for_instance(self, instance):
"""What MAC addresses must this instance have?
Some hypervisors (such as bare metal) cannot do freeform virtualisation
of MAC addresses. This method allows drivers to return a set of MAC
addresses that the instance is to have. allocate_for_instance will take
this into consideration when provisioning networking for the instance.
Mapping of MAC addresses to actual networks (or permitting them to be
freeform) is up to the network implementation layer. For instance,
with openflow switches, fixed MAC addresses can still be virtualised
onto any L2 domain, with arbitrary VLANs etc, but regular switches
require pre-configured MAC->network mappings that will match the
actual configuration.
Most hypervisors can use the default implementation which returns None.
Hypervisors with MAC limits should return a set of MAC addresses, which
will be supplied to the allocate_for_instance call by the compute
manager, and it is up to that call to ensure that all assigned network
details are compatible with the set of MAC addresses.
This is called during spawn_instance by the compute manager.
:return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
None means 'no constraints', a set means 'these and only these
MAC addresses'.
"""
return None
def dhcp_options_for_instance(self, instance):
"""Get DHCP options for this instance.
Some hypervisors (such as bare metal) require that instances boot from
the network, and manage their own TFTP service. This requires passing
the appropriate options out to the DHCP service. Most hypervisors can
use the default implementation which returns None.
This is called during spawn_instance by the compute manager.
Note that the format of the return value is specific to Quantum
client API.
:return: None, or a set of DHCP options, eg:
| [{'opt_name': 'bootfile-name',
| 'opt_value': '/tftpboot/path/to/config'},
| {'opt_name': 'server-ip-address',
| 'opt_value': '1.2.3.4'},
| {'opt_name': 'tftp-server',
| 'opt_value': '1.2.3.4'}
| ]
"""
pass
def manage_image_cache(self, context, all_instances):
"""Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
:param instances: nova.objects.instance.InstanceList
"""
pass
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
# NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
"""Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
def get_available_nodes(self, refresh=False):
"""Returns nodenames of all nodes managed by the compute service.
This method is for multi compute-nodes support. If a driver supports
multi compute-nodes, this method returns a list of nodenames managed
by the service. Otherwise, this method should return
[hypervisor_hostname].
"""
stats = self.get_host_stats(refresh=refresh)
if not isinstance(stats, list):
stats = [stats]
return [s['hypervisor_hostname'] for s in stats]
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""
if nodename in self.get_available_nodes():
return True
# Refresh and check again.
return nodename in self.get_available_nodes(refresh=True)
def get_per_instance_usage(self):
"""Get information about instance resource usage.
:returns: dict of nova uuid => dict of usage info
"""
return {}
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: nova.objects.instance.Instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
return False
def register_event_listener(self, callback):
"""Register a callback to receive events.
Register a callback to receive asynchronous event
notifications from hypervisors. The callback will
be invoked with a single parameter, which will be
an instance of the nova.virt.event.Event class.
"""
self._compute_event_callback = callback
def emit_event(self, event):
"""Dispatches an event to the compute manager.
Invokes the event callback registered by the
compute manager to dispatch the event. This
must only be invoked from a green thread.
"""
if not self._compute_event_callback:
LOG.debug("Discarding event %s", str(event))
return
if not isinstance(event, virtevent.Event):
raise ValueError(
_("Event must be an instance of nova.virt.event.Event"))
try:
LOG.debug("Emitting event %s", str(event))
self._compute_event_callback(event)
except Exception as ex:
LOG.error(_("Exception dispatching event %(event)s: %(ex)s"),
{'event': event, 'ex': ex})
def delete_instance_files(self, instance):
"""Delete any lingering instance files for an instance.
:param instance: nova.objects.instance.Instance
:returns: True if the instance was deleted from disk, False otherwise.
"""
return True
@property
def need_legacy_block_device_info(self):
"""Tell the caller if the driver requires legacy block device info.
Tell the caller whether we expect the legacy format of block
device info to be passed in to methods that expect it.
"""
return True
def volume_snapshot_create(self, context, instance, volume_id,
create_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Volume to be snapshotted
:param create_info: The data needed for nova to be able to attach
to the volume. This is the same data format returned by
Cinder's initialize_connection() API call. In the case of
doing a snapshot, it is the image file Cinder expects to be
used as the active disk after the snapshot operation has
completed. There may be other data included as well that is
needed for creating the snapshot.
"""
raise NotImplementedError()
def volume_snapshot_delete(self, context, instance, volume_id,
snapshot_id, delete_info):
"""Snapshots volumes attached to a specified instance.
:param context: request context
:param instance: nova.objects.instance.Instance that has the volume
attached
:param volume_id: Attached volume associated with the snapshot
:param snapshot_id: The snapshot to delete.
:param delete_info: Volume backend technology specific data needed to
be able to complete the snapshot. For example, in the case of
qcow2 backed snapshots, this would include the file being
merged, and the file being merged into (if appropriate).
"""
raise NotImplementedError()
def default_root_device_name(self, instance, image_meta, root_bdm):
"""Provide a default root device name for the driver."""
raise NotImplementedError()
def default_device_names_for_instance(self, instance, root_device_name,
*block_device_lists):
"""Default the missing device names in the block device mapping."""
raise NotImplementedError()
def is_supported_fs_format(self, fs_type):
"""Check whether the file format is supported by this driver
:param fs_type: the file system type to be checked,
the validate values are defined at disk API module.
"""
# NOTE(jichenjc): Return False here so that every hypervisor
# need to define their supported file system
# type and implement this function at their
# virt layer.
return False
def load_compute_driver(virtapi, compute_driver=None):
"""Load a compute driver module.
Load the compute driver module specified by the compute_driver
configuration option or, if supplied, the driver name supplied as an
argument.
Compute drivers constructors take a VirtAPI object as their first object
and this must be supplied.
:param virtapi: a VirtAPI instance
:param compute_driver: a compute driver name to override the config opt
:returns: a ComputeDriver instance
"""
if not compute_driver:
compute_driver = CONF.compute_driver
if not compute_driver:
LOG.error(_("Compute driver option required, but not specified"))
sys.exit(1)
LOG.info(_("Loading compute driver '%s'") % compute_driver)
try:
driver = importutils.import_object_ns('nova.virt',
compute_driver,
virtapi)
return utils.check_isinstance(driver, ComputeDriver)
except ImportError:
LOG.exception(_("Unable to load the virtualization driver"))
sys.exit(1)
def compute_driver_matches(match):
return CONF.compute_driver and CONF.compute_driver.endswith(match)
| {
"content_hash": "808c82f56ee5bd6d8200f84f9d4521aa",
"timestamp": "",
"source": "github",
"line_count": 1379,
"max_line_length": 79,
"avg_line_length": 40.53009427121102,
"alnum_prop": 0.6355763897586374,
"repo_name": "virtualopensystems/nova",
"id": "f12757ad6ac98bf9ccb669e901ba80eaf91119b6",
"size": "56527",
"binary": false,
"copies": "2",
"ref": "refs/heads/bp/vif-vhostuser",
"path": "nova/virt/driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14939768"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
import sh
from os.path import join
from kivy_ios.toolchain import CythonRecipe, shprint
from kivy_ios.context_managers import cd
class NetifacesRecipe(CythonRecipe):
version = "0.10.9"
url = "https://pypi.io/packages/source/n/netifaces/netifaces-{version}.tar.gz"
depends = ["python3", "host_setuptools3"]
python_depends = ["setuptools"]
library = "libnetifaces.a"
cythonize = False
def dest_dir(self):
return join(self.ctx.dist_dir, "root", "python3")
def get_netifaces_env(self, arch):
build_env = arch.get_env()
build_env["PYTHONPATH"] = self.ctx.site_packages_dir
return build_env
def install(self):
arch = list(self.filtered_archs)[0]
build_dir = self.get_build_dir(arch.arch)
build_env = self.get_netifaces_env(arch)
hostpython = sh.Command(self.ctx.hostpython)
with cd(build_dir):
shprint(
hostpython,
"setup.py",
"install",
"--prefix",
self.dest_dir(),
_env=build_env,
)
recipe = NetifacesRecipe()
| {
"content_hash": "2795a1d118dbede5a5542c4b087187fe",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 82,
"avg_line_length": 27.878048780487806,
"alnum_prop": 0.5896762904636921,
"repo_name": "kivy/kivy-ios",
"id": "02efcac414763382850bb83a9c1f9b2b02d6c220",
"size": "1143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kivy_ios/recipes/netifaces/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "537"
},
{
"name": "Cython",
"bytes": "7138"
},
{
"name": "Objective-C",
"bytes": "28133"
},
{
"name": "Python",
"bytes": "125074"
},
{
"name": "Shell",
"bytes": "672"
},
{
"name": "kvlang",
"bytes": "1377"
}
],
"symlink_target": ""
} |
from ingenico.connect.sdk.param_request import ParamRequest
from ingenico.connect.sdk.request_param import RequestParam
class ConvertAmountParams(ParamRequest):
"""
Query parameters for Convert amount
See also https://epayments-api.developer-ingenico.com/s2sapi/v1/en_US/python/services/convertAmount.html
"""
__source = None
__target = None
__amount = None
@property
def source(self):
"""
| Three-letter ISO currency code representing the source currency
Type: str
"""
return self.__source
@source.setter
def source(self, value):
self.__source = value
@property
def target(self):
"""
| Three-letter ISO currency code representing the target currency
Type: str
"""
return self.__target
@target.setter
def target(self, value):
self.__target = value
@property
def amount(self):
"""
| Amount to be converted in cents and always having 2 decimals
Type: long
"""
return self.__amount
@amount.setter
def amount(self, value):
self.__amount = value
def to_request_parameters(self):
"""
:return: list[RequestParam]
"""
result = []
if self.source is not None:
result.append(RequestParam("source", self.source))
if self.target is not None:
result.append(RequestParam("target", self.target))
if self.amount is not None:
result.append(RequestParam("amount", str(self.amount)))
return result
| {
"content_hash": "2602ea3ccc7e35da643a59a5b14d85ca",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 108,
"avg_line_length": 24.90909090909091,
"alnum_prop": 0.5888077858880778,
"repo_name": "Ingenico-ePayments/connect-sdk-python2",
"id": "6e57befe2c1a1fd1eae946fae936e4f6a9a34b9b",
"size": "1795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingenico/connect/sdk/merchant/services/convert_amount_params.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1733005"
}
],
"symlink_target": ""
} |
from django.shortcuts import render,get_object_or_404
from django.http import HttpResponse,Http404,HttpResponseRedirect
from django.core.urlresolvers import reverse
from polls.models import Poll
from django.template import RequestContext,loader
from django.views import generic
from django.utils import timezone
# Create your views here.
def index(request):
latest_poll_list = Poll.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html') #the pages design is hard-coded in the view - we add this line to change the way the page looks
context = RequestContext(request, {
'latest_poll_list' : latest_poll_list,
})
return render(request,'polls/index.html',context)
#The render() function is a shortcut to requestcontext/load - it takes the request object as its first argument, a template name as its second argument and a dictionary as its optional third argument
# View function turned into a class for simplicity/genericity
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_poll_list'
def get_queryset(self):
'''Return the last 5 published polls (not including those set to be published in the future).'''
return Poll.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
## function in views gets called in URL.py
## functions pointing to other pages go here
def detail(request, poll_id):
# long way
# try:
# poll = Poll.objects.get(pk=poll_id)
# except Poll.DoesNotExist:
# raise Http404
# shortcut
poll = get_object_or_404(Poll,pk=poll_id)
return render(request,'polls/detail.html',{'poll':poll})
# detail function turned into a class
class DetailView(generic.DetailView):
model = Poll
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any polls that aren't published yet.
"""
return Poll.objects.filter(pub_date__lte=timezone.now())
def results(request, poll_id):
poll = get_object_or_404(Poll,pk=poll_id)
return render(request,'polls/results.html',{'poll':poll})
# results function turned into a class
class ResultsView(generic.DetailView):
model = Poll
template_name = 'polls/results.html'
def vote(request, poll_id):
p = get_object_or_404(Poll,pk=poll_id)
try:
selected_choice = p.choice_set.get(pk=request.POST['choice'])
except (KeyError,Choice.DoesNotExist):
#Redisplay the poll voting form
return render(request,'polls/details.html',{
'poll':p,
'error_message':"You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing with POST data.
# This prevents data from being posted twice if a user hits the back button.
return HttpResponseRedirect(reverse('polls:results',args=(p.id,)))
#redirects user to the results page
| {
"content_hash": "eab186ea92412426ed0fc4ffeb34285e",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 199,
"avg_line_length": 33.84705882352941,
"alnum_prop": 0.7337504344803615,
"repo_name": "lugubru/kinetic",
"id": "e17863196e77de0126d2f4b9aba6ab2dbbad6d4f",
"size": "2877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "basics/mysite/polls/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "220"
},
{
"name": "HTML",
"bytes": "17551"
},
{
"name": "Python",
"bytes": "23561"
}
],
"symlink_target": ""
} |
import logging.config
import PythonCRUD.crud.deleterecords
import PythonCRUD.crud.utils.databaseutils
import PythonCRUD.crud.utils.query
class DeleteRecordsImpl(PythonCRUD.crud.deleterecords.DeleteRecords):
"""
This class implements the two interfaces
Scope of this class is delete the records in to database table
"""
def __init__(self):
DeleteRecordsImpl.log = logging.getLogger("DeleteRecordsImpl")
self.conn_utils = PythonCRUD.crud.utils.databaseutils.DatabaseUtils()
self.conn = self.conn_utils.get_data_source_connection()
pass
def delete_records(self, cause):
"""
This method will delete the record
:param cause: condition have to delete
:return: None
"""
try:
query = PythonCRUD.crud.utils.query.DELETE_QUERY
cursor = self.conn.cursor()
cursor.execute(query, cause)
self.conn.commit()
DeleteRecordsImpl.log.info("Deleted successfully...")
except Exception as ex:
DeleteRecordsImpl.log.exception("Exception occurred while delete record(s) : %s", ex)
finally:
self.finalized()
def finalized(self):
"""
This is close the resources
:return:
"""
self.conn_utils.close_database_connection(self.conn)
| {
"content_hash": "fd1937d10a3837ba1ef394cb3a1a74a2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 32.5,
"alnum_prop": 0.638095238095238,
"repo_name": "sundarcse1216/python-crud",
"id": "7740f11f8ac7884aab881bd73f6c074d5ca9c49d",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonCRUD/crud/deleterecordsimpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17537"
}
],
"symlink_target": ""
} |
import hatServer.sortingHat.sortingHat
import hatServer.sortingHat.variables
import hatServer.sortingHat.buildDB
| {
"content_hash": "2e6c7ef13cdcf986f16799d4caa4a510",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 28.5,
"alnum_prop": 0.8859649122807017,
"repo_name": "jeor0980/TeamChampion",
"id": "431143f1521bbf92e4e781fbb6f65de0c3eee842",
"size": "114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hatServer/sortingHat/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "157016"
},
{
"name": "HTML",
"bytes": "61930"
},
{
"name": "JavaScript",
"bytes": "386078"
},
{
"name": "Python",
"bytes": "69055"
}
],
"symlink_target": ""
} |
import random
import numpy as np
from helpers import compute_volume
import pygmsh
def test():
with pygmsh.geo.Geometry() as geom:
# Generate an approximation of a circle
t = np.arange(0, 2.0 * np.pi, 0.05)
x = np.column_stack([np.cos(t), np.sin(t), np.zeros_like(t)])
points = [geom.add_point(p) for p in x]
# Shuffle the orientation of lines by point order
o = [0 if k % 3 == 0 else 1 for k in range(len(points))]
lines = [
geom.add_line(points[k + o[k]], points[k + (o[k] + 1) % 2])
for k in range(len(points) - 1)
]
lines.append(geom.add_line(points[-1], points[0]))
# Shuffle the order of lines
random.seed(1)
random.shuffle(lines)
oriented_lines = pygmsh.orient_lines(lines)
ll = geom.add_curve_loop(oriented_lines)
geom.add_plane_surface(ll)
mesh = geom.generate_mesh()
ref = np.pi
assert abs(compute_volume(mesh) - ref) < 1.0e-2 * ref
return mesh
if __name__ == "__main__":
test().write("physical.vtu")
| {
"content_hash": "bf1f85c568d640b8e4bc355947d80366",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 71,
"avg_line_length": 26.682926829268293,
"alnum_prop": 0.5703839122486288,
"repo_name": "nschloe/python4gmsh",
"id": "735ac56fdeabd721e689ec46f9f6792e4f26c87d",
"size": "1094",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/built_in/test_unordered_unoriented.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "405"
},
{
"name": "Python",
"bytes": "40391"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('teslarent', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='credentials',
name='refresh_token',
field=models.CharField(max_length=2048),
),
]
| {
"content_hash": "0b6c8470d981c201c31fa87aa6a9125e",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 52,
"avg_line_length": 21.375,
"alnum_prop": 0.5789473684210527,
"repo_name": "flarno11/teslarent",
"id": "43fbba1888e149eb6f9d7c3647e6ab068c341aeb",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "teslarent/migrations/0002_auto_20220216_2114.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "HTML",
"bytes": "22116"
},
{
"name": "JavaScript",
"bytes": "10187"
},
{
"name": "Procfile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "101710"
},
{
"name": "Shell",
"bytes": "910"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pygion
from pygion import task, RW
import numpy
@task(privileges=[RW])
def hello(R):
print(R.x)
# Define the main task. This task is called first.
@task
def main():
R = pygion.Region([0, 0], {'x': pygion.float64})
pygion.fill(R, 'x', 3.14)
hello(R)
if __name__ == '__main__':
main()
| {
"content_hash": "aa2ef7d2bde7c5d6dfb37478f217ec50",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 52,
"avg_line_length": 17.65,
"alnum_prop": 0.6203966005665722,
"repo_name": "StanfordLegion/legion",
"id": "46204d43f4248b8f8dbd0ae39e926f88b12abfb7",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "bindings/python/tests/pass/empty_region.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "335761"
},
{
"name": "C++",
"bytes": "17156793"
},
{
"name": "CMake",
"bytes": "240564"
},
{
"name": "Cuda",
"bytes": "29542"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "Fortran",
"bytes": "346250"
},
{
"name": "HTML",
"bytes": "3653"
},
{
"name": "JavaScript",
"bytes": "94778"
},
{
"name": "Makefile",
"bytes": "119231"
},
{
"name": "Perl",
"bytes": "145756"
},
{
"name": "Python",
"bytes": "1661733"
},
{
"name": "Raku",
"bytes": "34306"
},
{
"name": "Rouge",
"bytes": "2303312"
},
{
"name": "Rust",
"bytes": "222951"
},
{
"name": "Shell",
"bytes": "12892"
},
{
"name": "Terra",
"bytes": "1709732"
}
],
"symlink_target": ""
} |
"""Tests for perfkitbenchmarker.packages.blazemark."""
import os
import unittest
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_packages import blazemark
class BlazemarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
maxDiff = None
def setUp(self):
self.data_dir = os.path.join(os.path.dirname(__file__), '..', 'data')
def testParseResult(self):
result_path = os.path.join(self.data_dir, 'blazemark-output.txt')
with open(result_path) as result_file:
out = result_file.read()
results = blazemark._ParseResult(out, 'test')
self.assertEqual(14, len(results)) # 14 results
self.assertEqual('test_C-like_Throughput', results[0].metric)
self.assertEqual(1115.44, results[0].value)
self.assertEqual('MFlop/s', results[0].unit)
self.assertEqual({'N': 100}, results[0].metadata)
self.assertEqual('test_Eigen_Throughput', results[-1].metric)
self.assertEqual(209.899, results[-1].value)
self.assertEqual('MFlop/s', results[-1].unit)
self.assertEqual({'N': 10000000}, results[-1].metadata)
def testParseExpResult(self):
result_path = os.path.join(self.data_dir, 'blazemark-output2.txt')
with open(result_path) as result_file:
out = result_file.read()
results = blazemark._ParseResult(out, 'test')
self.assertEqual(10, len(results)) # 10 results
self.assertEqual('test_Blaze_Throughput', results[0].metric)
self.assertEqual(float('3.03424e-08'), results[0].value)
self.assertEqual('Seconds', results[0].unit)
self.assertEqual({'N': 3}, results[0].metadata)
self.assertEqual('test_Blaze_Throughput', results[-1].metric)
self.assertEqual(31.9121, results[-1].value)
self.assertEqual('Seconds', results[-1].unit)
self.assertEqual({'N': 2000}, results[-1].metadata)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e4e75c3f2fe8c5806b9a4dabda483a5d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 73,
"avg_line_length": 38.795918367346935,
"alnum_prop": 0.6806943713834824,
"repo_name": "GoogleCloudPlatform/PerfKitBenchmarker",
"id": "133af785be77885368e42d77ce9e272134ac85a8",
"size": "2511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/linux_packages/blazemark_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3420"
},
{
"name": "HTML",
"bytes": "113073"
},
{
"name": "Jinja",
"bytes": "62005"
},
{
"name": "Lua",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "6076512"
},
{
"name": "R",
"bytes": "1017"
},
{
"name": "Shell",
"bytes": "76164"
},
{
"name": "Tcl",
"bytes": "14601"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('ext'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['refcounting']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jansson'
copyright = u'2009-2016, Petri Lehtinen'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.11'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'c:func'
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Janssondoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Jansson.tex', u'Jansson Documentation',
u'Petri Lehtinen', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jansson', u'Jansson Documentation',
[u'Petri Lehtinen'], 1)
]
| {
"content_hash": "89d9a8c7a4cd3b2e6510e76922f522a4",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 32.3921568627451,
"alnum_prop": 0.7062651331719129,
"repo_name": "ipdcode/containerdns",
"id": "ded8eadaf7a22ec94b356cfe5158518bf734b07d",
"size": "7026",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kdns/deps/libjansson/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "245554"
},
{
"name": "Shell",
"bytes": "3360"
}
],
"symlink_target": ""
} |
import datetime
import os.path
from scrapy import log
from scrapy.exceptions import CloseSpider
from scraper.models import Event
from scraper.scraper_test import EventSpider, ScraperTest
from dynamic_scraper.models import SchedulerRuntime, Log
class ScraperRunTest(ScraperTest):
def test_missing_base_elem(self):
self.se_base.delete()
self.assertRaises(CloseSpider, self.run_event_spider, 1)
def test_missing_url_elem(self):
self.se_url.delete()
self.assertRaises(CloseSpider, self.run_event_spider, 1)
def test_scraper(self):
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 4)
self.assertEqual(Event.objects.get(title='Event 1').description, u'Event 1 description')
def test_standard_field_as_detail_page_url_hack(self):
self.se_desc.x_path = u'a/text()'
self.se_desc.from_detail_page = False
self.se_desc.save()
self.soa_title.attr_type = 'U'
self.soa_title.save()
self.soa_url.attr_type = 'S'
self.soa_url.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 4)
def test_double(self):
checker_rt = SchedulerRuntime()
checker_rt.save()
event = Event(title=u'Event 1', url=u'http://localhost:8010/static/site_generic/event1.html',
checker_runtime=checker_rt)
event.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 4)
self.assertEqual(len(Event.objects.filter(title='Event 1')), 1)
def test_testmode(self):
kwargs = {
'id': 1,
}
spider = EventSpider(**kwargs)
self.crawler.crawl(spider)
self.crawler.start()
self.assertEqual(len(Event.objects.all()), 0)
def test_task_run_type(self):
self.event_website.url = os.path.join(self.SERVER_URL, 'not_existing_site/event_main.html')
self.event_website.save()
kwargs = {
'id': 1,
'do_action': 'yes',
'run_type': 'TASK',
}
spider = EventSpider(**kwargs)
self.crawler.crawl(spider)
self.crawler.start()
self.assertEqual(spider.scheduler_runtime.num_zero_actions, 1)
spider.log("Test message", log.ERROR)
self.assertGreater(Log.objects.count(), 0)
def test_no_task_run_type(self):
self.event_website.url = os.path.join(self.SERVER_URL, 'not_existing_site/event_main.html')
self.event_website.save()
kwargs = {
'id': 1,
'do_action': 'yes',
'run_type': 'SHELL',
}
spider = EventSpider(**kwargs)
self.crawler.crawl(spider)
self.crawler.start()
self.assertEqual(spider.scheduler_runtime.num_zero_actions, 0)
spider.log("Test message", log.ERROR)
self.assertEqual(Log.objects.count(), 0)
def test_max_items_read(self):
self.scraper.max_items_read = 3
self.scraper.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 3)
def test_max_items_save(self):
self.scraper.max_items_read = 3
self.scraper.max_items_save = 2
self.scraper.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_missing_mandatory(self):
self.se_desc.mandatory = True
self.se_desc.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_missing_mandatory/event_main.html')
self.event_website.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_scraper_pause_status(self):
self.scraper.status = 'P'
self.scraper.save()
self.assertRaises(CloseSpider, self.run_event_spider, 1)
def test_scraper_inactive_status(self):
self.scraper.status = 'I'
self.scraper.save()
self.assertRaises(CloseSpider, self.run_event_spider, 1)
def setUpProcessorTest(self):
self.se_url.processors = u'pre_url'
self.se_url.proc_ctxt = u"'pre_url': 'http://localhost:8010/static/site_with_processor/'"
self.se_url.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_processor/event_main.html')
self.event_website.save()
def test_processor(self):
self.setUpProcessorTest()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_multiple_processors_use(self):
self.setUpProcessorTest()
self.se_desc.processors = u'pre_string, post_string '
self.se_desc.proc_ctxt = u"'pre_string': 'before_', 'post_string': '_after',"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.get(id=1).description, 'before_Event 2 description_after')
def test_replace_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = u'/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = u'replace'
self.se_title.proc_ctxt = u"'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 0)
def test_replace_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = u'replace'
self.se_title.proc_ctxt = u"'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = u'/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = u'static'
self.se_title.proc_ctxt = u"'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = u'static'
self.se_title.proc_ctxt = u"'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_reg_exp(self):
self.se_desc.reg_exp = u'(\d{6})'
self.se_desc.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_reg_exp/event_main.html')
self.event_website.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(Event.objects.get(title='Event 1').description, '563423')
def test_with_imgs(self):
path1 = os.path.join(self.PROJECT_ROOT, 'imgs/1d7c0c2ea752d7aa951e88f2bc90a3f17058c473.jpg')
if os.access(path1, os.F_OK):
os.unlink(path1)
path2 = os.path.join(self.PROJECT_ROOT, 'imgs/3cfa4d48e423c5eb3d4f6e9b5e5d373036ac5192.jpg')
if os.access(path2, os.F_OK):
os.unlink(path2)
self.se_desc.mandatory = True
self.se_desc.save()
self.soa_desc.attr_type = 'I'
self.soa_desc.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_imgs/event_main.html')
self.event_website.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(Event.objects.get(title='Event 1').description, '1d7c0c2ea752d7aa951e88f2bc90a3f17058c473.jpg')
self.assertTrue(os.access(path1, os.F_OK))
self.assertTrue(os.access(path2, os.F_OK))
| {
"content_hash": "ad49c0d72356d6721e9bd55f21b512d1",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 120,
"avg_line_length": 33.073469387755104,
"alnum_prop": 0.5965691719116377,
"repo_name": "developerworks/django-dynamic-scraper",
"id": "06ec07319448e7362f79f3adec98abf0af5ef6bd",
"size": "8103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scraper/scraper_run_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "95079"
},
{
"name": "Shell",
"bytes": "1876"
}
],
"symlink_target": ""
} |
from enum import Enum
class CriteriaTarget(Enum):
ROW = 1 | {
"content_hash": "a23b28275e8e07f6783183a0a3bfdf5b",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 27,
"avg_line_length": 12.6,
"alnum_prop": 0.7142857142857143,
"repo_name": "smartsheet-platform/smartsheet-python-sdk",
"id": "53a2830a0a4a8db0e6a4674b91dc291569e0c1ed",
"size": "733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartsheet/models/enums/criteria_target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7339"
},
{
"name": "Python",
"bytes": "879963"
},
{
"name": "Shell",
"bytes": "2671"
},
{
"name": "Smarty",
"bytes": "482"
}
],
"symlink_target": ""
} |
"""Classes to encapsulate the idea of a dataset in machine learning,
including file access.
This file contains the ARFF class for people who have arff installed.
"""
# This software is distributed under BSD 3-clause license (see LICENSE file).
#
# Authors: Soeren Sonnenburg
try:
import arff
have_arff = True
except ImportError:
have_arff = False
import sys
from numpy import array, concatenate
import csv
from esvm.mldata import DatasetFileBase
class DatasetFileARFF(DatasetFileBase):
"""Attribute-Relation File Format file, uses module arff.
Labels are in the first column.
"""
def __init__(self,filename,extype,dataname='ARFFdata',comment=''):
"""Do the base class init, then add some arff specific metadata"""
if not have_arff:
print 'import arff failed, currently cannot support ARFF file format'
return
DatasetFileBase.__init__(self,filename,extype)
self.dataname = dataname
self.comment = comment
def readlines(self,idx=None):
"""Read from file and split data into examples and labels"""
fp = open(self.filename,'r')
(dataname,issparse,alist,data) = arff.arffread(fp)
fp.close()
self.dataname = dataname
#if (alist[0][0]!='label'):
# sys.stderr.write('First column of ARFF file needs to be the label\n')
# sys.exit(-1)
if idx is None:
idx = range(len(data))
labels = [data[ix][0] for ix in idx]
labels = array(labels)
if self.extype == 'vec':
examples = [data[ix][1:] for ix in idx]
examples = array(examples).T
print '%d features, %d examples' % examples.shape
elif self.extype == 'seq':
examples = [data[ix][1] for ix in idx]
print 'sequence length = %d, %d examples' % (len(examples[0]),len(examples))
elif self.extype == 'mseq':
examples = [data[ix][1:] for ix in idx]
printstr = 'sequence lengths = '
for seq in examples[0]:
printstr += '%d, ' % len(seq)
printstr += '%d examples' % len(examples)
print printstr
return (examples, labels)
def writelines(self,examples,labels,idx=None):
"""Merge the examples and labels and write to file"""
alist = [('label',1,[])]
if idx is not None:
examples = examples[idx]
labels = labels[idx]
if self.extype == 'vec':
data = list(concatenate((labels.reshape(len(labels),1),examples.T),axis=1))
for ix in xrange(examples.shape[0]):
attname = 'att%d' % ix
alist.append((attname,1,[]))
elif self.extype == 'seq':
data = zip(labels,examples)
alist.append(('sequence',0,[]))
elif self.extype == 'mseq':
data = []
for ix,curlab in enumerate(labels):
data.append([curlab]+list(examples[ix]))
alist.append(('upstream sequence',0,[]))
alist.append(('downstream sequence',0,[]))
fp = open(self.filename,'w')
arff.arffwrite(fp,alist,data,name=self.dataname,comment=self.comment)
fp.close()
| {
"content_hash": "48fe196305dceb49e35e7aa945ba7d10",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 88,
"avg_line_length": 33.670103092783506,
"alnum_prop": 0.5808328230251072,
"repo_name": "sorig/shogun",
"id": "69481f5aa6c51e4bb3be22cc0afe04f27cb0cd1d",
"size": "3289",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "applications/easysvm/esvm/mldata_arff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "568"
},
{
"name": "C",
"bytes": "11644"
},
{
"name": "C++",
"bytes": "10500045"
},
{
"name": "CMake",
"bytes": "196913"
},
{
"name": "Dockerfile",
"bytes": "2423"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "3829"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "284970"
},
{
"name": "Shell",
"bytes": "11995"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 - Frank Lin
from java_variable import VarType
from skrutil.string_utils import indent
_JAVA_BR = '\n\n'
_JAVA_SPACE = ' '
class JavaClass:
"""Java file generator, including package, class, fields, enums and manager.
"""
def __init__(self, group_name, class_name, java_variable_list, java_enum_list, java_manager_or_none):
"""Init Java file generator.
Args:
group_name: A string which is package name, input name is C++ folder name, should be all lowercase. (eg: task)
class_name: A string which is class name, input name is C++ class name, should be capitalized. (eg: Task)
java_variable_list: A list of <JavaVariable> object which mean all fields in class.
java_enum_list: A list of <JavaEnum> object which mean all enums in class.
java_manager_or_none: A <JavaManager> object means ObjectManager or none means the class has no ObjectManager.
"""
self.__group_name = group_name
self.__class_name = class_name
self.__java_var_list = java_variable_list
self.__java_enum_list = java_enum_list
self.__java_manager_or_none = java_manager_or_none
if self.__java_manager_or_none is not None:
self.__java_manager_or_none.set_object_name(class_name, class_name + 's')
self.__java_manager_or_none.set_java_variable_list(java_variable_list)
def generate_java_v2(self, config):
"""Generates java object implementation.
Args:
config: A config that enables some user-defined name.
"""
file_name = self.__class_name + '.java'
file_path = 'build/{0}/'.format(config.java_package_path) + self.__group_name + '/' + file_name
output_java = open(file_path, 'w')
java_package = 'package {0}.'.format(config.java_package_name) + self.__group_name + ';'
output_java.write(java_package + _JAVA_BR)
java_import = 'import android.support.annotation.IntDef;\n'
java_import += 'import java.lang.annotation.Retention;\n'
java_import += 'import java.lang.annotation.RetentionPolicy;' + _JAVA_BR
java_import += 'import {0}.base.{1};\n'.format(config.java_package_name, config.java_base_object)
java_import += 'import {0}.jni.CoreObject;\n'.format(config.java_package_name)
java_import += 'import java.util.ArrayList;\n'
java_import += 'import java.util.Arrays;\n'
if self.__class_name != 'List':
java_import += 'import java.util.List;'
java_class_start = 'public final class ' + self.__class_name + ' extends {0} {{'.format(config.java_base_object)
java_class_end = '}'
output_java.write(java_import + _JAVA_BR)
output_java.write(java_class_start + _JAVA_BR)
# generates enum in class
for java_enum in self.__java_enum_list:
output_java.write(java_enum.generate_android_enum(_JAVA_SPACE) + '\n')
# generates fields in class
for java_var in self.__java_var_list:
output_java.write(_JAVA_SPACE + java_var.private_field_name() + '\n')
output_java.write('\n')
# generates constructor
output_java.write(self.__constructors_v2())
output_java.write(_JAVA_BR)
# generate getters
for java_var in self.__java_var_list:
output_java.write(java_var.getter_v2() + _JAVA_BR)
# end brace
output_java.write(java_class_end + '\n')
def generate_java(self):
"""Gets Java with JNI implementation. The class inherits from |CoreObject| which means invoker should release
the object himself/herself by calling |CoreObject.dispose()|.
New development should use <generate_java_v2> instead.
Returns:
A string which is the class implementation.
"""
file_name = self.__class_name + '.java'
file_path = 'build/com/lesschat/core/' + self.__group_name + '/' + file_name
output_java = open(file_path, 'w')
java_package = 'package com.lesschat.core.' + self.__group_name + ';'
output_java.write(java_package + _JAVA_BR)
java_import = 'import android.os.Parcel;\n'
java_import += 'import android.os.Parcelable;' + _JAVA_BR
java_import += 'import com.lesschat.core.jni.CoreObject;' + _JAVA_BR
java_import += 'import java.util.ArrayList;\n'
java_import += 'import java.util.Arrays;\n'
if self.__class_name != 'List':
java_import += 'import java.util.List;'
java_class_start = 'public class ' + self.__class_name + ' extends CoreObject implements Parcelable {'
java_class_end = '}'
output_java.write(java_import + _JAVA_BR)
output_java.write(java_class_start + _JAVA_BR)
output_java.write(self.__constructors())
output_java.write(indent(4) + '@Override\n')
output_java.write(indent(4) + 'public void dispose() {\n')
output_java.write(indent(2) + 'nativeRelease{0}(mNativeHandler);\n'.format(self.__class_name))
output_java.write(indent(4) + '}' + _JAVA_BR)
for java_enum in self.__java_enum_list:
output_java.write(java_enum.generate_java_enum(_JAVA_SPACE) + '\n')
for java_var in self.__java_var_list:
output_java.write(java_var.getter() + _JAVA_BR)
output_java.write(_JAVA_BR)
output_java.write(self.__native_constructors())
output_java.write(indent(4) + 'private native void nativeRelease{0}(long handler);'.format(self.__class_name) + _JAVA_BR)
for java_var in self.__java_var_list:
output_java.write(java_var.native_getter() + _JAVA_BR)
output_java.write(self.__parcelable())
output_java.write(java_class_end)
def generate_manager(self, version, config):
"""Generates Java manager implementation code.
Args:
version: A float for compact usage.
config: A <Config> object describes some user-defined names.
Returns:
A string which is Java manager implementation code.
"""
if self.__java_manager_or_none is None:
return
manager_name = self.__java_manager_or_none.manager_name
file_name = self.__java_manager_or_none.manager_name + '.java'
file_path = 'build/{0}/'.format(config.java_package_path) + self.__group_name + '/' + file_name
output_java = open(file_path, 'w')
java_package = 'package {0}.'.format(config.java_package_name) + self.__group_name + ';'
output_java.write(java_package + _JAVA_BR)
java_import = ''
if len(self.__java_manager_or_none.apis) != 0:
java_import += 'import {0}.api.*;\n'.format(config.java_package_name)
java_import += 'import {2}.{0}.{1}.*;\n'.format(self.__group_name, self.__class_name, config.java_package_name)
java_import += 'import {0}.jni.CoreObject;\n'.format(config.java_package_name)
java_import += 'import {0}.director.Director;\n'.format(config.java_package_name)
java_import += 'import {0}.jni.JniHelper;\n\n'.format(config.java_package_name)
java_import += 'import java.util.ArrayList;\n'
java_import += 'import java.util.List;' + _JAVA_BR
java_class_start = 'public class ' + manager_name + ' extends CoreObject {' + _JAVA_BR
java_class_end = '}'
java_manager_constructor = 'public static {0} getInstance() {{ return Director.getInstance().get{0}(); }}'
java_override = '@Override\n'
java_manager_dispose = 'public void dispose() { }' + _JAVA_BR
output_java.write(java_import)
output_java.write(java_class_start)
output_java.write(self.__java_manager_or_none.generate_http_variables())
output_java.write('\n')
output_java.write(indent(4) + java_manager_constructor.format(manager_name) + _JAVA_BR)
output_java.write(indent(4) + java_override)
output_java.write(indent(4) + java_manager_dispose)
if version < 5.0:
output_java.write(self.__java_manager_or_none.generate_fetch())
output_java.write(self.__java_manager_or_none.generate_http_function())
output_java.write(self.__java_manager_or_none.generate_fetch_native())
output_java.write(self.__java_manager_or_none.generate_http_function_native())
else:
output_java.write(self.__java_manager_or_none.generate_fetch_v2())
output_java.write(self.__java_manager_or_none.generate_http_function(version))
output_java.write(self.__java_manager_or_none.generate_fetch_native_v2())
output_java.write(self.__java_manager_or_none.generate_http_function_native())
output_java.write(java_class_end)
def __constructors(self):
"""Java class constructor with native handler as parameter.
Returns:
A string which is the implementation of the constructor. For example:
public Task(long nativeHandler) {
mNativeHandler = nativeHandler;
}
"""
constructor = indent(4) + 'public {0}() {{ \n mNativeHandler = nativeCreate{0}(); \n }}'\
.format(self.__class_name) + _JAVA_BR
constructor += indent(4) + 'public {0}(long nativeHandler) {{\n'.format(self.__class_name)
constructor += indent(2) + 'mNativeHandler = nativeHandler;\n'
constructor += indent(4) + '}\n\n'
return constructor
def __constructors_v2(self):
"""Java class constructor with all fields as parameters.
Returns:
A string which is the implementation of the constructor. For example:
/*package*/ File(String fileId,
@File.Type int type,
@File.Visibility int visibility,
@File.Belong int belong,
@File.FolderPermissionSetting int folderPermissionSetting,
String createdBy,
long createdAt,
String updatedBy,
long updatedAt) {
mFileId = fileId;
... Remainder omitted...
}
"""
package_class = indent(4) + '/*package*/ {0}'.format(self.__class_name)
num_line_indent = len(package_class) + 1
if len(self.__java_var_list) > 1:
first_var = self.__java_var_list[0].input_parameter_name()
constructor = '{0}({1},\n'.format(package_class, first_var)
for var in self.__java_var_list:
if first_var == var.input_parameter_name():
continue
constructor += indent(num_line_indent) + '{0},'.format(var.input_parameter_name()) + '\n'
constructor = constructor[:-2] # remove break line and last comma
elif len(self.__java_var_list) == 1:
first_var = self.__java_var_list[0].input_parameter_name()
constructor = '{0}({1})'.format(package_class, first_var)
else:
constructor = '{0}()'.format(package_class)
constructor += ') {\n'
for var in self.__java_var_list:
constructor += indent(8) + var.assignment() + '\n'
constructor += indent(4) + '}'
return constructor
def __constructor_with_variable(self):
constructor = indent(4) + 'public {0}('.format(self.__class_name)
space = len(indent(4) + 'public {0}('.format(self.__class_name))
space_str = ''
for space_index in range(0, space):
space_str += ' '
for index in range(0, len(self.__java_var_list)):
java_var = self.__java_var_list[index]
java_var_type = java_var.var_type
if index == 0:
if java_var_type == VarType.cpp_enum:
constructor += '{0} {1},\n'.format(java_var.java_enum, java_var.name_str)
elif java_var_type == VarType.cpp_string_array:
constructor += 'String[] {0},\n'.format(java_var.name_str)
else:
constructor += '{0} {1},\n'.format(java_var.var_type.to_java_getter_setter_string(), java_var.name_str)
else:
if java_var_type == VarType.cpp_enum:
constructor += space_str + '{0} {1},\n'.format(java_var.java_enum, java_var.name_str)
elif java_var_type == VarType.cpp_string_array:
constructor += space_str + 'String[] {0},\n'.format(java_var.name_str)
else:
constructor += space_str + '{0} {1},\n'.format(java_var.var_type.to_java_getter_setter_string(), java_var.name_str)
constructor = constructor[:-2]
constructor += '){\n'
constructor += indent(2) + 'mNativeHandler = nativeCreate{0}('.format(self.__class_name)
for java_var in self.__java_var_list:
if java_var.var_type == VarType.cpp_enum:
constructor += java_var.name_str + '.getValue(), '
else:
constructor += java_var.name_str + ', '
constructor = constructor[:-2]
constructor += ');\n'
constructor += indent(4) + '}' + _JAVA_BR
return constructor
def __native_constructors(self):
native_constructor = indent(4) + 'private native long nativeCreate{0}();'.format(self.__class_name) + _JAVA_BR
# native_constructor += self.__native_constructor_with_variable()
return native_constructor
def __native_constructor_with_variable(self):
space_str = ''
native_constructor = indent(4) + 'private native long nativeCreate{0}('.format(self.__class_name)
for space_index in range(0, len(indent(4) + 'private native long nativeCreate{0}('.format(self.__class_name))):
space_str += ' '
for index in range(0, len(self.__java_var_list)):
java_var = self.__java_var_list[index]
java_var_type = java_var.var_type
if index == 0:
if java_var_type == VarType.cpp_enum:
native_constructor += 'int {0},\n'.format(java_var.name_str)
elif java_var_type == VarType.cpp_string_array:
native_constructor += 'String[] {0},\n'.format(java_var.name_str)
else:
native_constructor += '{0} {1},\n'.format(java_var.var_type.to_java_getter_setter_string(), java_var.name_str)
else:
if java_var_type == VarType.cpp_enum:
native_constructor += space_str + 'int {0},\n'.format(java_var.name_str)
elif java_var_type == VarType.cpp_string_array:
native_constructor += space_str + 'String[] {0},\n'.format(java_var.name_str)
else:
native_constructor += space_str + '{0} {1},\n'.format(java_var.var_type.to_java_getter_setter_string(), java_var.name_str)
native_constructor = native_constructor[:-2]
native_constructor += ');' + _JAVA_BR
return native_constructor
def __initwith(self):
initwith = indent(4) + 'public boolean initWithJson(String json) { return nativeInitWithJson(mNativeHandler, json); }'
initwith += _JAVA_BR
return initwith
def __native_initwith(self):
native_initwith = indent(4) + 'private native boolean nativeInitWithJson(long handler, String json);'
native_initwith += _JAVA_BR
return native_initwith
def __parcelable(self):
parcelable = indent(4) + 'public {0}(Parcel in) {{\n'.format(self.__class_name)
parcelable += indent(2) + 'mNativeHandler = in.readLong();\n'
parcelable += indent(4) + '}' + _JAVA_BR
parcelable += indent(4) + 'public static final Parcelable.Creator<{0}> CREATOR = new Parcelable.Creator<{0}>() {{\n\n'\
.format(self.__class_name)
parcelable += indent(2) + 'public {0} createFromParcel(Parcel in) {{ return new {0}(in); }}\n\n'\
.format(self.__class_name)
parcelable += indent(2) + 'public {0}[] newArray(int size) {{ return new {0}[size]; }}\n'\
.format(self.__class_name)
parcelable += indent(4) + '};' + _JAVA_BR
parcelable += indent(4) + '@Override\n'
parcelable += indent(4) + 'public int describeContents() { return 0; }' + _JAVA_BR
parcelable += indent(4) + '@Override\n'
parcelable += indent(4) + 'public void writeToParcel(Parcel parcel, int i) { parcel.writeLong(mNativeHandler); }\n'
parcelable += '\n'
return parcelable
| {
"content_hash": "b91ed3edaf37bd33b0e7889a0a2b1a9d",
"timestamp": "",
"source": "github",
"line_count": 357,
"max_line_length": 142,
"avg_line_length": 47.15126050420168,
"alnum_prop": 0.5843284025426246,
"repo_name": "DaYeSquad/worktilerwdemo",
"id": "fde3634b7237d4c28d9d4d40017d0f5b8e299c45",
"size": "16833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model-builder/skr_java_builder/java_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "75876"
},
{
"name": "Java",
"bytes": "12534"
},
{
"name": "Objective-C",
"bytes": "9926"
},
{
"name": "Objective-C++",
"bytes": "6292"
},
{
"name": "Python",
"bytes": "283587"
}
],
"symlink_target": ""
} |
import random
from .base import MessageHandler
class EightBallHandler(MessageHandler):
TRIGGERS = ['8ball', 'eight_ball']
HELP = 'random fortune'
def handle_message(self, event, triggers, query):
return random.choice((
'It is certain',
'It is decidedly so',
'Without a doubt',
'Yes - definitely',
'You may rely on it',
'As I see it, yes',
'Most likely',
'Outlook good',
'Yes',
'Signs point to yes',
'Reply hazy, try again',
'Ask again later',
'Better not tell you now',
'Cannot predict now',
'Concentrate and ask again',
"Don't count on it",
'My reply is no',
'My sources say no',
'Outlook not so good',
'Very doubtful',
))
| {
"content_hash": "01a43703744063f28d0f7f729ae3cd1f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 51,
"avg_line_length": 25.0625,
"alnum_prop": 0.5473815461346634,
"repo_name": "nkouevda/slack-rtm-bot",
"id": "2ed289934ff13620a7d648548f3feffb9c2879f1",
"size": "802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slack_rtm_bot/handlers/eight_ball.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11254"
}
],
"symlink_target": ""
} |
import os, sys, re, math
from db import SQLite3
def get_words(document):
splitter = re.compile(r'\W*')
words = [s.lower() for s in splitter.split(document)\
if len(s) > 2 and len(s) < 20]
return dict([w, 1] for w in words)
class Classifier(object):
def __init__(self, get_features, filename=None):
self.db = SQLite3()
# count of feature / category
self.fc = {}
# document count in each category
self.cc = {}
# method that gets features
self.get_features = get_features
def incf(self, f, cat):
count = self.fcount(f, cat)
if count == 0:
self.db.execute("insert into fc values (?, ?, 1)", (f, cat))
else:
self.db.execute("update fc set count = ? where feature=? and category=? ", (count + 1, f, cat))
def incc(self, cat):
count = self.catcount(cat)
if count == 0:
self.db.execute("insert into cc values (?, 1, ?)", (cat, self.default))
else:
self.db.execute("update cc set count = ? where category=? ", (count + 1, cat))
def decf(self, f, cat):
count = self.fcount(f, cat)
if count == 0:
self.db.execute("insert into fc values (?, ?, 1)", (f, cat))
else:
if count - 1 > 0:
self.db.execute("update fc set count = ? where feature=? and category=? ", (count - 1, f, cat))
else:
self.db.execute("update fc set count = 0 where feature=? and category=? ", (f, cat))
def decc(self, cat):
count = self.catcount(cat)
if count == 0:
self.db.execute("insert into cc values (?, 1, ?)", (cat, self.default))
else:
if count - 1 > 0:
self.db.execute("update cc set count = ? where category=? ", (count - 1, cat))
else:
self.db.execute("update cc set count = 0 where category=? ", (cat,))
# get feature count
def fcount(self, f, cat):
res = self.db.execute('select count from fc where feature=? and category = ?', (f, cat,)).fetchone()
if res == None: return 0
return float(res[0])
# get category count
def catcount(self, cat):
res = self.db.execute('select count from cc where category = ?', (cat,)).fetchone()
if res == None: return 0
return float(res[0])
# get item count
def totalcount(self):
res = self.db.execute('select sum(count) from cc').fetchone()
if res == None: return 0
return float(res[0])
# get categories formatted by list
def categories(self):
return [d[0] for d in self.db.execute('select category from cc')]
def train(self, document, cat):
features = self.get_features(document)
for f in features:
self.incf(f, cat)
self.incc(cat)
self.db.commit()
def fprob(self, f, cat):
if self.catcount(cat) == 0: return 0
return float(self.fcount(f, cat)) / self.catcount(cat)
def weighted_prob(self, f, cat, prf, weight=1.0, ap=0.5):
basicprob = prf(f, cat)
totals = sum([self.fcount(f, c) for c in self.categories()])
bp = ((weight * ap) + (totals * basicprob)) / (weight + totals)
return bp
class Naivebayes(Classifier):
pass
class FisherClassifier(Classifier):
def __init__(self, get_features):
Classifier.__init__(self, get_features)
self.minimums = {}
self.default = 0.6
def cprob(self, f, cat):
# frequency of feature in this category
clf = self.fprob(f, cat)
if clf == 0: return 0
# frequency of feature in all categories
freqsum = sum([self.fprob(f, c) for c in self.categories()])
p = clf / (freqsum)
return p
def fisherprob(self, item, cat):
p = 1
features = self.get_features(item)
for f in features:
p *= (self.weighted_prob(f, cat, self.cprob))
fscore = -2 * math.log(p)
return self.invchi2(fscore, len(features) * 2)
def invchi2(self, chi, df):
m = chi / 2.0
sum = term = math.exp(-m)
for i in range(1, df // 2):
term *= m / i
sum += term
return min(sum, 1.0)
def set_minimum(self, cat, minimum):
count = 0
res = self.db.execute("select count(*) from cc where category=? limit 1", (cat,)).fetchone()
if res != None: count = res[0]
if count == 0:
self.db.execute("insert into cc values (?, 0, ?)", (cat, minimum))
else:
self.db.execute("update cc set minimum=? where category=? ", (minimum, cat))
self.minimums[cat] = minimum
def get_minimum(self, cat):
if cat not in self.minimums:
self.minimums[cat] = self.db.execute("select minimum from cc where category=? limit 1", (cat,)).fetchone()
if self.minimums[cat] == None:
self.set_minimum(cat, self.default)
return self.get_minimum(cat)
return self.minimums[cat]
def classify(self, item, default=None):
best = default
max = 0.0
for c in self.categories():
p = self.fisherprob(item, c)
if p > self.get_minimum(c) and p > max:
best = c
max = p
return best
def main():
pass
if __name__ == '__main__': main() | {
"content_hash": "9c9992493e44aeaba8885782ddfbb987",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 118,
"avg_line_length": 34.254658385093165,
"alnum_prop": 0.5347234814143246,
"repo_name": "after12am/expecto",
"id": "adddfcd93ea6545ac02a1a97d5fedf0059749a48",
"size": "5515",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "machine_learning/feed_classifier/src/core/classifier.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17940"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class StorageAccountCheckNameAvailabilityParameters(Model):
"""The parameters used to check the availabity of the storage account name.
:param name:
:type name: str
:param type: Default value: "Microsoft.Storage/storageAccounts" .
:type type: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, name, type="Microsoft.Storage/storageAccounts"):
self.name = name
self.type = type
| {
"content_hash": "afb5d645374714a971e51a72e4eaa38d",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 26.25,
"alnum_prop": 0.6031746031746031,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "af3628ff05c8226815f5eef712f398251323b2e7",
"size": "1104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-storage/azure/mgmt/storage/v2015_06_15/models/storage_account_check_name_availability_parameters.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="value", parent_name="scatter.error_x", **kwargs):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "0735582b2ebb2df3866ca1e0a6f8aeb5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 85,
"avg_line_length": 37,
"alnum_prop": 0.5904365904365905,
"repo_name": "plotly/python-api",
"id": "0817ab7f17f43fa9cfe54617cce880e207b671f4",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter/error_x/_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import unittest
import cPickle
import os
import numpy as np
from sifra.sysresponse import calc_loss_arrays
from sifra.sifraclasses import _ScenarioDataGetter, FacilitySystem, Scenario
from sifra.sysresponse import calc_sys_output
__author__ = 'sudipta'
class TestSifra(unittest.TestCase):
def test_calc_loss_arrays(self):
"""
:return: tests the calc_loss_arrays function, on a non-parallel run.
"""
SETUPFILE = 'tests/config_ps_X_test.conf'
SETUPFILE = os.path.join(os.getcwd(), SETUPFILE)
print('\nUsing default test setup file')
scenario = Scenario(SETUPFILE)
facility = FacilitySystem('tests/config_ps_X_test.conf')
print('\n========================= Testing serial run =========================')
component_resp_df = calc_sys_output(facility, scenario)
ids_comp_vs_haz, sys_output_dict, component_resp_dict, calculated_output_array, \
economic_loss_array, output_array_given_recovery \
= calc_loss_arrays(facility, scenario, component_resp_df, parallel_proc=0)
test_ids_comp_vs_haz = cPickle.load(open('tests/ids_comp_vs_haz.pickle', 'rb'))
test_sys_output_dict = cPickle.load(open('tests/sys_output_dict.pickle', 'rb'))
for k, v in ids_comp_vs_haz.iteritems():
self.assertEqual(v.shape, (scenario.num_samples, facility.num_elements), msg='size mismatch')
for k in ids_comp_vs_haz:
np.testing.assert_array_equal(ids_comp_vs_haz[k], test_ids_comp_vs_haz[k], 'arrays not equal', verbose=True)
#
for k in sys_output_dict:
np.testing.assert_array_equal(sys_output_dict[k], test_sys_output_dict[k], 'arrays not equal', verbose=True)
def test_calc_loss_arrays_parallel(self):
"""
:return: tests the calc_loss_arrays function, on a parallel run.
"""
SETUPFILE = 'tests/config_ps_X_test.conf'
SETUPFILE = os.path.join(os.getcwd(), SETUPFILE)
print ('using default test setupfile')
scenario = Scenario(SETUPFILE)
facility = FacilitySystem('tests/config_ps_X_test.conf')
print('\n========================= Testing parallel run =========================')
component_resp_df = calc_sys_output(facility, scenario)
ids_comp_vs_haz, sys_output_dict, component_resp_dict, calculated_output_array, \
economic_loss_array, output_array_given_recovery \
= calc_loss_arrays(facility, scenario, component_resp_df, parallel_proc=1)
test_ids_comp_vs_haz = cPickle.load(open('tests/ids_comp_vs_haz.pickle', 'rb'))
test_sys_output_dict = cPickle.load(open('tests/sys_output_dict.pickle', 'rb'))
for k, v in ids_comp_vs_haz.iteritems():
self.assertEqual(v.shape, (scenario.num_samples, facility.num_elements), msg='size mismatch')
for k in ids_comp_vs_haz:
np.testing.assert_array_equal(ids_comp_vs_haz[k], test_ids_comp_vs_haz[k], 'arrays not equal', verbose=True)
for k in sys_output_dict:
np.testing.assert_array_equal(sys_output_dict[k], test_sys_output_dict[k], 'arrays not equal', verbose=True)
def test_extreme_values(self):
# sys_output_dict # should be full when 0, and 0 when hazard level 10
scenario = Scenario('tests/config_ps_X_test_extremes.conf')
facility = FacilitySystem('tests/config_ps_X_test_extremes.conf')
component_resp_df = calc_sys_output(facility, scenario)
ids_comp_vs_haz, sys_output_dict, component_resp_dict, calculated_output_array, \
economic_loss_array, output_array_given_recovery \
= calc_loss_arrays(facility, scenario, component_resp_df, parallel_proc=1)
# print facility.comp_df['cost_fraction']
for k, v in component_resp_dict.iteritems():
for kk, vv in v.iteritems():
component_cost_fraction = facility.comp_df['cost_fraction']['component_id'==kk[0]]
if k == scenario.hazard_intensity_str[0] and kk[1] == 'func_mean':
self.assertEqual(vv, 1.0)
if k == scenario.hazard_intensity_str[0] and kk[1] == 'loss_mean':
self.assertEqual(vv, 0.0)
if k == scenario.hazard_intensity_str[1] and kk[1] == 'func_mean':
if component_cost_fraction > 1e-3:
self.assertEqual(vv, 0.0, 'test for {} failed for PGA Level: {}'.format(kk[0], k))
if k == scenario.hazard_intensity_str[1] and kk[1] == 'loss_mean':
if component_cost_fraction > 1e-3:
self.assertEqual(vv, 1.0, 'test for {} failed for PGA Level: {}'.format(kk[0], k))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e03f2ad1920ed127e22065535ab3634d",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 120,
"avg_line_length": 51.81720430107527,
"alnum_prop": 0.6152728781904959,
"repo_name": "gasuperdev/sifra",
"id": "7fe41bd2c2f946871af5866b4347b9d34f6900e0",
"size": "4819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_sifra.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "30594"
},
{
"name": "HTML",
"bytes": "672"
},
{
"name": "JavaScript",
"bytes": "1892"
},
{
"name": "Python",
"bytes": "207358"
},
{
"name": "Shell",
"bytes": "2821"
},
{
"name": "TypeScript",
"bytes": "59108"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLPoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
op = core.CreateOperator(
method,
["X"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
| {
"content_hash": "1aad536758590bf316512b4279a0c1c4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 70,
"avg_line_length": 31.456521739130434,
"alnum_prop": 0.5908776779543884,
"repo_name": "bwasti/caffe2",
"id": "fa9301c2c9ebe0019fdcab2d18693bb157cf0cf4",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe2/python/mkl/mkl_pool_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4514"
},
{
"name": "C",
"bytes": "58731"
},
{
"name": "C++",
"bytes": "2743591"
},
{
"name": "CMake",
"bytes": "131386"
},
{
"name": "CSS",
"bytes": "2196"
},
{
"name": "Cuda",
"bytes": "455661"
},
{
"name": "HTML",
"bytes": "5203"
},
{
"name": "Jupyter Notebook",
"bytes": "4615340"
},
{
"name": "Makefile",
"bytes": "527"
},
{
"name": "Metal",
"bytes": "29686"
},
{
"name": "Objective-C",
"bytes": "828"
},
{
"name": "Objective-C++",
"bytes": "147470"
},
{
"name": "Python",
"bytes": "2137478"
},
{
"name": "Shell",
"bytes": "20688"
}
],
"symlink_target": ""
} |
"""Fingerprinting code for the Java runtime."""
import os
import textwrap
from gae_ext_runtime import ext_runtime
from googlecloudsdk.api_lib.app import util
from googlecloudsdk.api_lib.app.images import config
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import log
NAME ='java'
ALLOWED_RUNTIME_NAMES = ('java', 'java7', 'custom')
JAVA_RUNTIME_NAME = 'java'
# TODO(user): We'll move these into directories once we externalize
# fingerprinting.
JAVA_APP_YAML = textwrap.dedent("""\
runtime: {runtime}
env: 2
api_version: 1
""")
DOCKERIGNORE = textwrap.dedent("""\
.dockerignore
Dockerfile
.git
.hg
.svn
""")
DOCKERFILE_JAVA8_PREAMBLE = 'FROM gcr.io/google_appengine/openjdk8\n'
DOCKERFILE_JETTY9_PREAMBLE = 'FROM gcr.io/google_appengine/jetty9\n'
DOCKERFILE_LEGACY_PREAMBLE = 'FROM gcr.io/google_appengine/java-compat\n'
DOCKERFILE_COMPAT_PREAMBLE = 'FROM gcr.io/google_appengine/jetty9-compat\n'
DOCKEFILE_CMD = 'CMD {0}\n'
DOCKERFILE_JAVA8_ENTRYPOINT = 'ENTRYPOINT ["java", "-jar", "/app/{0}"]\n'
DOCKERFILE_INSTALL_APP = 'ADD {0} /app/\n'
DOCKERFILE_INSTALL_WAR = 'ADD {0} $JETTY_BASE/webapps/root.war\n'
class JavaConfigError(exceptions.Error):
"""Errors in Java Application Config."""
class JavaConfigurator(ext_runtime.Configurator):
"""Generates configuration for a Java application.
What is supported is:
- jar file (run with Open JDK8 image)
- war file (run with Jetty9 image)
- Exploded war directory (with WEB-INF/):
- if env: 2, we use the latest Jetty9 compat runtime image
- if not, we use the current Jetty9 compat image we build.
This will ease the transition to the new Jetty9 compat runtime for people
migrating to env: 2. Once all are on env: 2, we will remove entirely the
support for the legacy Jetty9 compat runtime.
"""
def __init__(self, path, appinfo, deploy, entrypoint, server,
openjdk, artifact_to_deploy, custom):
"""Constructor.
Args:
path: (str) Root path of the source tree.
appinfo: (apphosting.api.appinfo.AppInfoExternal or None) The parsed
app.yaml file for the module if it exists.
deploy: (bool) True if run in deployment mode.
entrypoint: (str) Name of the entrypoint to generate.
server: (str) Name of the server to use (jetty9 or None for now).
openjdk: (str) Name of the jdk to use (openjdk8 or None for now).
artifact_to_deploy: (str) Name of the file or directory to deploy.
custom: (bool) True if it is a custom runtime.
"""
self.root = path
self.appinfo = appinfo
self.deploy = deploy
self.custom = custom
self.entrypoint = entrypoint
self.server = server
self.openjdk = openjdk
self.artifact_to_deploy = artifact_to_deploy
# Write messages to the console or to the log depending on whether we're
# doing a "deploy."
if self.deploy:
self.notify = log.info
else:
self.notify = log.status.Print
def GenerateConfigs(self):
"""Generates all config files for the module.
Returns:
(ext_runtime.Cleaner) A cleaner populated with the generated files
"""
cleaner = ext_runtime.Cleaner()
if not self.appinfo:
self._GenerateAppYaml(cleaner)
if self.custom or self.deploy:
self.notify('Generating Dockerfile.')
self._GenerateDockerfile(cleaner)
self._GenerateDockerignore(cleaner)
if not cleaner.HasFiles():
self.notify('All config files already exist, not generating anything.')
return cleaner
def _GenerateAppYaml(self, cleaner):
"""Generates an app.yaml file appropriate to this application.
Args:
cleaner: (ext_runtime.Cleaner) A cleaner to populate
"""
app_yaml = os.path.join(self.root, 'app.yaml')
if not os.path.exists(app_yaml):
self.notify('Writing [app.yaml] to [{0}].'.format(self.root))
runtime = 'custom' if self.custom else 'java'
with open(app_yaml, 'w') as f:
f.write(JAVA_APP_YAML.format(runtime=runtime))
def _GenerateDockerfile(self, cleaner):
"""Generates a Dockerfile appropriate to this application.
Args:
cleaner: (ext_runtime.Cleaner) A cleaner to populate
Raises:
JavaConfigError: if there is an app.yaml configuration error.
"""
dockerfile = os.path.join(self.root, config.DOCKERFILE)
if not os.path.exists(dockerfile):
self.notify('Writing [%s] to [%s].' % (config.DOCKERFILE, self.root))
# Customize the dockerfile.
with open(dockerfile, 'w') as out:
if self.artifact_to_deploy.endswith('.war'):
out.write(DOCKERFILE_JETTY9_PREAMBLE)
out.write(DOCKERFILE_INSTALL_WAR.format(self.artifact_to_deploy))
if self.artifact_to_deploy.endswith('.jar'):
if self.server is not None:
raise JavaConfigError('Cannot use server %s '
'for jar deployment.' % self.server)
out.write(DOCKERFILE_JAVA8_PREAMBLE)
out.write(DOCKERFILE_INSTALL_APP.format(self.artifact_to_deploy))
if self.artifact_to_deploy == '.':
if self.appinfo and util.IsFlex(self.appinfo.env):
out.write(DOCKERFILE_COMPAT_PREAMBLE)
elif self.openjdk == 'openjdk8':
out.write(DOCKERFILE_COMPAT_PREAMBLE)
else:
out.write(DOCKERFILE_LEGACY_PREAMBLE)
out.write(DOCKERFILE_INSTALL_APP.format(self.artifact_to_deploy))
# Generate the appropriate start command.
if self.entrypoint:
out.write(DOCKEFILE_CMD % self.entrypoint)
elif self.artifact_to_deploy.endswith('.jar'):
# for jar execution generate the command to run:
out.write(DOCKERFILE_JAVA8_ENTRYPOINT.format(self.artifact_to_deploy))
cleaner.Add(dockerfile)
def _GenerateDockerignore(self, cleaner):
"""Generates a .dockerignore file appropriate to this application.
Args:
cleaner: (ext_runtime.Cleaner) A cleaner to populate
"""
dockerignore = os.path.join(self.root, '.dockerignore')
if not os.path.exists(dockerignore):
self.notify('Writing [.dockerignore] to [{0}].'.format(self.root))
with open(dockerignore, 'w') as f:
f.write(DOCKERIGNORE)
cleaner.Add(dockerignore)
def Fingerprint(path, params):
"""Check for a Java app.
Args:
path: (str) Application path.
params: (ext_runtime.Params) Parameters passed through to the
fingerprinters.
Returns:
(JavaConfigurator or None) Returns a module if the path contains a
Java app.
Raises:
JavaConfigError: if there is an app.yaml configuration error.
"""
entrypoint = None
server = None
openjdk = None
appinfo = params.appinfo
if appinfo and appinfo.entrypoint:
entrypoint = appinfo.entrypoint
log.info('Checking for Java.')
if appinfo:
runtime_config = appinfo.runtime_config
if runtime_config:
for key, value in runtime_config.iteritems():
if key == 'server':
if value != 'jetty9':
raise JavaConfigError('Unknown server : %s.' % value)
server = value
elif key == 'jdk':
if value != 'openjdk8':
raise JavaConfigError('Unknown JDK : %s.' % value)
openjdk = value
else:
raise JavaConfigError('Unknown runtime_config entry : %s.' % key)
artifact_to_deploy = '?'
# check for any Java known artifacts: a jar, a war, or an exploded Web App.
# TODO(user): expand to more complex configs with multiple Jars.
number_of_possible_artifacts = 0
for filename in os.listdir(path):
if filename.endswith('.war'):
artifact_to_deploy = filename
number_of_possible_artifacts += 1
if filename.endswith('.jar'):
artifact_to_deploy = filename
number_of_possible_artifacts += 1
if filename.endswith('WEB-INF'):
artifact_to_deploy = '.'
number_of_possible_artifacts += 1
if number_of_possible_artifacts == 0:
return None
if number_of_possible_artifacts > 1:
raise JavaConfigError('Too many java artifacts to deploy '
'(.jar, .war, or Java Web App).')
return JavaConfigurator(path, appinfo, params.deploy, entrypoint, server,
openjdk, artifact_to_deploy, params.custom)
| {
"content_hash": "64d38d599862dc3599bb0194602fa37b",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 80,
"avg_line_length": 34.9163179916318,
"alnum_prop": 0.6624325943678849,
"repo_name": "flgiordano/netcash",
"id": "e7b534616b8789d87b8880adc1bfc0be347f595d",
"size": "8940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/runtimes/java.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
"""
This model provides a base class for all models:
Models:
* Feature
* Scenario
* ScenarioOutline
* Step
"""
from .exceptions import RadishError
class Tag(object):
"""
Represents a tag for a model
"""
def __init__(self, name, arg=None):
self.name = name
self.arg = arg
# FIXME: make ABC
class Model(object):
"""
Represents a base model
"""
class Context(object):
"""
Represents a Models context.
For every feature/scenario a new Context object is created
"""
def __init__(self):
self.constants = []
def __init__(self, id, keyword, sentence, path, line, parent=None, tags=None):
self.id = id
self.keyword = keyword
self.sentence = sentence
self.path = path
self.line = line
self.parent = parent
self.tags = tags or []
self.starttime = None
self.endtime = None
@property
def all_tags(self):
"""
Return all tags for this model and all it's parents
"""
tags = self.tags
if self.parent:
tags.extend(self.parent.all_tags)
return tags
@property
def duration(self):
"""
Returns the duration of this model
"""
if not self.starttime or not self.endtime:
raise RadishError("Cannot get duration of {0} '{1}' because either starttime or endtime is not set".format(self.keyword, self.sentence))
return self.endtime - self.starttime
| {
"content_hash": "d20c4b5c18cda6399743eaa5a88861be",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 148,
"avg_line_length": 24.46153846153846,
"alnum_prop": 0.5566037735849056,
"repo_name": "SamuelYvon/radish",
"id": "e6719455ef298b6d16948d9a8dc131c3a62adebc",
"size": "1615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "radish/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "20376"
},
{
"name": "Python",
"bytes": "261585"
},
{
"name": "Shell",
"bytes": "1686"
}
],
"symlink_target": ""
} |
'''
Problem 50
15 August 2003
The prime 41, can be written as the sum of six consecutive primes:
41 = 2 + 3 + 5 + 7 + 11 + 13
This is the longest sum of consecutive primes that adds to a prime below one-hundred.
The longest sum of consecutive primes below one-thousand that adds to a prime, contains 21 terms, and is equal to 953.
Which prime, below one-million, can be written as the sum of the most consecutive primes?
'''
import euler
primes = euler.prime_sieve(10**4)
longest = primesum = 1
for i in range(len(primes)):
for j in range(i+longest, len(primes)):
if (sum(primes[i:j]) < 10**6) and euler.is_prime(sum(primes[i:j])):
longest = j-i
primesum = sum(primes[i:j])
print primesum
| {
"content_hash": "0e1639ddb463dab576b490e2fe4bc874",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 118,
"avg_line_length": 30.956521739130434,
"alnum_prop": 0.7008426966292135,
"repo_name": "robertdimarco/puzzles",
"id": "c19a0165a3f66700ba80d19a5798c887bf2e6c6d",
"size": "728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project-euler/050.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9675"
},
{
"name": "Go",
"bytes": "3794"
},
{
"name": "HTML",
"bytes": "20099"
},
{
"name": "Java",
"bytes": "33761"
},
{
"name": "JavaScript",
"bytes": "1785"
},
{
"name": "PHP",
"bytes": "3584"
},
{
"name": "Perl",
"bytes": "27"
},
{
"name": "Python",
"bytes": "93003"
},
{
"name": "Ruby",
"bytes": "18722"
},
{
"name": "Shell",
"bytes": "156962"
},
{
"name": "Thrift",
"bytes": "23873"
}
],
"symlink_target": ""
} |
import traceback
from change_range import ChangeRange
from statement import Statement, WarningResult
from tokenized_statement import TokenizedStatement;
class Chunk(object):
"""
A chunk is a series of consecutive lines in a Worksheet treated as a unit.
The Chunk class is a base class for different types of chunks. It contains
basic functionality for tracking a [start,end) range, and tracking what
lines withinthe chunk have changed.
"""
def __init__(self, start=-1, end=-1):
self.start = start
self.end = end
self.changes = ChangeRange()
self.newly_inserted = True
def set_range(self, start, end):
if start < self.start:
self.changes.insert(0, self.start - start)
self.start = start
if end > self.end:
self.changes.insert(self.end -self.start, self.start - start)
self.end = end
if start > self.start:
self.changes.delete_range(0, start - self.start)
self.start = start
if end < self.end:
self.changes.delete_range(end - self.start, self.end - self.start)
self.end = end
def change_line(self, line):
self.changes.change(line - self.start, line + 1 - self.start)
def change_lines(self, start, end):
self.changes.change(start - self.start, end - self.start)
def insert_lines(self, pos, count):
self.changes.insert(pos - self.start, count)
self.end += count
def delete_lines(self, start, end):
self.changes.delete_range(start - self.start, end - self.start)
# Note: deleting everything gives [end,end], which is legitimate
# but maybe a little surprising. Doesn't matter for us.
if start == self.start:
self.start = end
else:
self.end -= (end - start)
class StatementChunk(Chunk):
"""
StatementChunk represents a series of lines making up a single unit of Python
code. (Roughly, but perhaps not exactly corresponding to a statement in the
Python grammar. A StatementChunk might contain text that isn't compilable at all.)
In addition to the basic range-tracking capabilities of the base class, the
StatementChunk class holds a tokenized representation of the code, information
about the status of the chunk (needs_compile, needs_execute), and after compilation
and/or execution, the resulting results or errors.
"""
def __init__(self, start=-1, end=-1):
Chunk.__init__(self, start, end)
self.tokenized = TokenizedStatement()
self.status_changed = False
self.results_changed = False
self.executing = False
self.needs_compile = False
self.needs_execute = False
self.statement = None
self.results = None
self.error_message = None
self.error_line = None
self.error_offset = None
def __repr__(self):
return "StatementChunk(%d,%d,%r,%r,%r)" % (self.start, self.end, self.needs_compile, self.needs_execute, self.tokenized.get_text())
def set_lines(self, lines):
range = self.tokenized.set_lines(lines)
if range == None:
return False
if range[0] != range[1]: # non-empty range ... empty=truncation
self.change_lines(self.start + range[0], self.start + range[1])
if not self.needs_compile:
self.needs_compile = True
self.status_changed = True
self.needs_execute = False
self.statement = None
return True
def mark_for_execute(self):
if self.statement != None and not self.needs_execute:
self.statement.mark_for_execute()
self.needs_execute = True
self.status_changed = True
return True
else:
return False
def get_statement(self, worksheet):
if not self.statement:
self.statement = Statement(self.tokenized.get_text(), worksheet)
self.statement.chunk = self
return self.statement
def update_statement(self):
self.status_changed = True
if self.statement.state == Statement.COMPILE_SUCCESS:
self.needs_compile = False
self.needs_execute = True
elif self.statement.state == Statement.EXECUTING:
self.executing = True
elif self.statement.state == Statement.EXECUTE_SUCCESS:
self.executing = False
self.needs_compile = False
self.needs_execute = False
if self.results != self.statement.results:
self.results_changed = True
self.results = self.statement.results
self.error_message = None
self.error_line = None
self.error_offset = None
elif self.statement.state == Statement.COMPILE_ERROR:
self.needs_compile = True
self.needs_execute = True
self.error_message = self.statement.error_message
self.error_line = self.statement.error_line
self.error_offset = self.statement.error_offset
self.results = None
self.results_changed = True
elif self.statement.state == Statement.EXECUTE_ERROR:
self.executing = False
self.needs_compile = False
self.needs_execute = True
self.error_message = self.statement.error_message
self.error_line = self.statement.error_line
self.error_offset = self.statement.error_offset
self.results = None
self.results_changed = True
elif self.statement.state == Statement.INTERRUPTED:
self.executing = False
self.needs_compile = False
self.needs_execute = True
self.error_message = "Interrupted"
self.error_line = None
self.error_offset = None
self.results = None
self.results_changed = True
else:
# NEW/EXECUTING should not be hit here
raise AssertionError("Unexpected state in Chunk.update_statement()")
class BlankChunk(Chunk):
"""
BlankChunk represents a series of consecutive blank lines.
"""
def __init__(self, start=-1, end=-1):
Chunk.__init__(self, start, end)
def __repr__(self):
return "BlankChunk(%d,%d)" % (self.start, self.end)
class CommentChunk(Chunk):
"""
CommentChunk represents a series of consecutive comment lines.
"""
def __init__(self, start=-1, end=-1):
Chunk.__init__(self, start, end)
def __repr__(self):
return "CommentChunk(%d,%d)" % (self.start, self.end)
| {
"content_hash": "09791069ac5e1d0da3dc52ee91754d95",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 139,
"avg_line_length": 34.864583333333336,
"alnum_prop": 0.6092022706901703,
"repo_name": "lamby/pkg-reinteract",
"id": "aa2f771a101081d5d2ee1baae4f78b9f8d8380cb",
"size": "6966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reinteract/chunks.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "8876"
},
{
"name": "C++",
"bytes": "416"
},
{
"name": "Objective-C",
"bytes": "23124"
},
{
"name": "Python",
"bytes": "446059"
},
{
"name": "Shell",
"bytes": "46924"
}
],
"symlink_target": ""
} |
"""Script to parse MAF file
To run: python load.py config_file
"""
import sys
from bigquery_etl.load import load_data_from_file
import json
import os
from bigquery_etl.utils.logging_manager import configure_logging
def load(config):
"""
Load the bigquery table
load_data_from_file accepts following params:
project_id, dataset_id, table_name, schema_file, data_path,
source_format, write_disposition, poll_interval, num_retries
"""
log = configure_logging('mirna_isoform_load', 'logs/mirna_isoform_load.log')
log.info('begin load of mirna isoform into bigquery')
schemas_dir = os.environ.get('SCHEMA_DIR', 'schemas/')
log.info("\tLoading Isoform HiSeq data into BigQuery..")
load_data_from_file.run(
config['project_id'],
config['bq_dataset'],
config['mirna']['isoform']['bq_table_hiseq'],
schemas_dir + config['mirna']['isoform']['schema_file'],
'gs://' + config['buckets']['open'] + '/' +\
config['mirna']['isoform']['output_dir'] + 'IlluminaHiSeq/*',
'NEWLINE_DELIMITED_JSON',
'WRITE_EMPTY'
)
log.info("*"*30)
log.info("\tLoading Isoform GA data into BigQuery..")
load_data_from_file.run(
config['project_id'],
config['bq_dataset'],
config['mirna']['isoform']['bq_table_ga'],
schemas_dir + config['mirna']['isoform']['schema_file'],
'gs://' + config['buckets']['open'] + '/' +\
config['mirna']['isoform']['output_dir'] + 'IlluminaGA/*',
'NEWLINE_DELIMITED_JSON',
'WRITE_EMPTY'
)
log.info('done load of mirna isoform into bigquery')
if __name__ == '__main__':
load(json.load(open(sys.argv[1])))
| {
"content_hash": "9bf2bd991fea408dc4c57335bfcaa352",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 80,
"avg_line_length": 33.68627450980392,
"alnum_prop": 0.610011641443539,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "31ac4883b559acb824c050b110e63dd304882be3",
"size": "2331",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tcga_etl_pipeline/mirna/isoform/load.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from distutils.core import setup
with open("LICENCE",'r') as input_h:
LICENCE=input_h.read()
VERSION="3.2"
setup(name='tema-adapterlib',
provides=['adapterlib',],
license=LICENCE,
version=VERSION,
description='Library for building keyword-adapters',
author="Tampere University of Technology, Department of Software Systems",
author_email='teams@cs.tut.fi',
url='http://tema.cs.tut.fi',
packages=['adapterlib'],
)
| {
"content_hash": "29df5ecdf4890689e06c6db22c91bf62",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 80,
"avg_line_length": 28.38888888888889,
"alnum_prop": 0.6673189823874756,
"repo_name": "samini/gort-public",
"id": "2f5785d17d246f8009fdfcf965dbc315b7ebd8d1",
"size": "1674",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Source/Squiddy/src/tema-adapterlib-3.2-sma/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36395"
},
{
"name": "Groff",
"bytes": "1577"
},
{
"name": "HTML",
"bytes": "15840"
},
{
"name": "Java",
"bytes": "1292597"
},
{
"name": "Makefile",
"bytes": "4963"
},
{
"name": "Python",
"bytes": "532008"
},
{
"name": "Shell",
"bytes": "118901"
},
{
"name": "XSLT",
"bytes": "221100"
}
],
"symlink_target": ""
} |
__import__( "IECore" )
__import__( "IECoreScene" )
import warnings
# the first import of pyopenvdb results in a duplicate c++ -> python
# boost python type conversions warnings.
# we use the warnings module to suppress these during the import
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import pyopenvdb
from _IECoreVDB import *
__import__( "IECore" ).loadConfig( "CORTEX_STARTUP_PATHS", subdirectory = "IECoreVDB" )
| {
"content_hash": "02382d64c1d46805c94fa125247e1cd5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.7375565610859729,
"repo_name": "appleseedhq/cortex",
"id": "cce54a68f6db7be5bb29c114fc7d04a19cc5a6b9",
"size": "2226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/IECoreVDB/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1374"
},
{
"name": "C",
"bytes": "66503"
},
{
"name": "C++",
"bytes": "9536541"
},
{
"name": "CMake",
"bytes": "95418"
},
{
"name": "GLSL",
"bytes": "24422"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "2360"
},
{
"name": "Python",
"bytes": "4651272"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
} |
"""Controllers for interactive and non-interactive widgets."""
__author__ = 'sll@google.com (Sean Lip)'
from oppia.apps.widget.models import InteractiveWidget
from oppia.apps.widget.models import NonInteractiveWidget
from oppia.controllers.base import BaseHandler
import utils
from google.appengine.api import users
class WidgetRepositoryPage(BaseHandler):
"""Displays the widget repository page."""
def get(self):
"""Returns the widget repository page."""
if self.request.get('iframed') == 'true':
self.values['iframed'] = True
if self.request.get('interactive') == 'true':
self.values['interactive'] = True
if 'parent_index' in self.request.GET.keys():
self.values['parent_index'] = self.request.get('parent_index')
if users.is_current_user_admin():
self.values['admin'] = True
self.render_template('editor/widget_repository.html')
class WidgetRepositoryHandler(BaseHandler):
"""Provides data to populate the widget repository page."""
def get_widgets(self, widget_class):
"""Load widgets from the datastore."""
assert widget_class in [InteractiveWidget, NonInteractiveWidget]
response = {}
for widget in widget_class.query():
category = widget.category
if category not in response:
response[category] = []
response[category].append(
widget_class.get_with_params(widget.id, {})
)
for category in response:
response[category].sort()
return response
def get(self): # pylint: disable-msg=C6409
"""Handles GET requests."""
response = {}
if self.request.get('interactive') == 'true':
response['widgets'] = self.get_widgets(InteractiveWidget)
else:
response['widgets'] = self.get_widgets(NonInteractiveWidget)
parent_index = self.request.get('parent_index')
if parent_index is None:
raise Exception(
'No parent index supplied for non-interactive widget.')
else:
response['parent_index'] = parent_index
self.render_json(response)
class NonInteractiveWidgetHandler(BaseHandler):
"""Handles requests relating to interactive widgets."""
# TODO(sll): Combine this with InteractiveWidgetHandler.
def get(self, widget_id):
"""Handles GET requests."""
try:
self.render_json({
'widget': NonInteractiveWidget.get_with_params(widget_id, {}),
})
except:
raise self.PageNotFoundException
def post(self, widget_id):
"""Handles POST requests, for parameterized widgets."""
params = self.payload.get('params', {})
if isinstance(params, list):
new_params = {}
for item in params:
new_params[item['name']] = item['default_value']
params = new_params
state_params_dict = {}
state_params_given = self.payload.get('state_params')
if state_params_given:
for param in state_params_given:
# Pick a random parameter for each key.
state_params_dict[param['name']] = (
utils.get_random_choice(param['values']))
# TODO(sll): In order to unify this with InteractiveWidgetHandler,
# we need a convention for which params must be JSONified and which
# should not. Fix this.
response = NonInteractiveWidget.get_with_params(widget_id, params)
self.render_json({
'widget': response,
'parent_index': self.request.get('parent_index'),
})
class InteractiveWidgetHandler(BaseHandler):
"""Handles requests relating to interactive widgets."""
def get(self, widget_id):
"""Handles GET requests."""
try:
self.render_json({
'widget': InteractiveWidget.get_with_params(widget_id, {}),
})
except:
raise self.PageNotFoundException
def post(self, widget_id):
"""Handles POST requests, for parameterized widgets."""
params = self.payload.get('params', {})
if isinstance(params, list):
new_params = {}
for item in params:
new_params[item['name']] = item['default_value']
params = new_params
state_params_dict = {}
state_params_given = self.payload.get('state_params')
if state_params_given:
for param in state_params_given:
# Pick a random parameter for each key.
state_params_dict[param['name']] = (
utils.get_random_choice(param['values']))
response = InteractiveWidget.get_with_params(
widget_id, params=utils.parse_dict_with_params(
params, state_params_dict)
)
self.render_json({'widget': response})
| {
"content_hash": "285539565c0d60c75d28c6b285d4671e",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 78,
"avg_line_length": 35.359154929577464,
"alnum_prop": 0.5925114519020116,
"repo_name": "sunu/oppia-test-4",
"id": "b7b5b225701482f856ee049182411aac7958a40b",
"size": "5619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oppia/controllers/widgets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "5224054"
},
{
"name": "Python",
"bytes": "285341"
},
{
"name": "Shell",
"bytes": "11309"
}
],
"symlink_target": ""
} |
from copy import deepcopy
import glob
import os
import textwrap
import time
verbose = 0
cmd = None
changes = dict()
def setLevel(level):
global verbose
verbose = level
def setCmd(command):
global cmd
cmd = command
def runAtLevel(level):
global verbose
def outer(f):
def decoration(*args, **kwargs):
if verbose >= level:
return f(*args, **kwargs)
else:
return ""
def doNothing(*args, **kwargs):
return ""
return decoration
return outer
@runAtLevel(3)
def startupReport(files):
global changes
for f in files:
if f.isdir:
changes[f.path] = deepcopy(set(f.watching))
print "[watch] Directory " + str(f) + "/ initialized with contents:"
for fcontent in f.watching:
print " " + str(f) + " => " + str(fcontent)
else:
changes[f.path] = f.mod_time
print "[watch] File " + str(f) + " initialized with time " + str(f.mod_time)
@runAtLevel(2)
def globExpandReport(f):
print textwrap.fill("[watch] Glob expanding " + str(f) + " -> " + ", ".join(glob.glob(f)), initial_indent='', subsequent_indent=' ')
@runAtLevel(2)
def pathExpandReport(files):
print "[watch] Expanded paths:"
for f in files:
print " " + f + " -> " + os.path.abspath(f)
def changeReport(f):
output = _changeSimple(f) + _changeWithDetail(f) + _changeWithCommand(f)
print output
@runAtLevel(1)
def _changeSimple(f):
now = time.gmtime()
strnow = "[" + time.strftime("%Y-%m-%d %H:%M:%S", now) + "]"
ftype = "File"
if f.isdir:
ftype = "Directory"
output = strnow + " " + ftype + " " + str(f) + " has changed.\n"
return output
@runAtLevel(3)
def _changeWithDetail(f):
global changes
if f.isdir:
# find the change and report it
now = set(f.watching)
then = changes[f.path]
if now == then:
output = ""
for f in then:
if f.hasChanged():
output += " (file " + str(f) + " has changed)\n"
# list the files that have been added or removed
else:
difference = (now - then) | (then - now)
output = ""
for d in difference:
output += " (file " + str(d) + " has changed)\n"
changes[f.path] = deepcopy(now)
else:
output = " (from " + str(changes[f.path]) + " to " + str(f.mod_time) + ")\n"
changes[f.path] = f.mod_time
return output
@runAtLevel(2)
def _changeWithCommand(f):
global cmd
return "[watch] Running '" + cmd + "'"
| {
"content_hash": "d5fb56577bba7bc86d4c5570bb67bcc5",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 136,
"avg_line_length": 19.355371900826448,
"alnum_prop": 0.6182749786507259,
"repo_name": "swirepe/Watch",
"id": "62b71ea028805af12a9bf89d18146f0d1b845585",
"size": "2342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verbose.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7449"
}
],
"symlink_target": ""
} |
import os
import sys
import time
import numpy
import theano
import nonlinearity
import theano.tensor as T
from theano.tensor.signal import downsample
from theano.tensor.nnet import conv
import theano.tensor.nnet as Tnn
class ConvMaxPool(object):
"""
Pool Layer of a convolutional network
"""
def __init__(self, rng, filter_shape, image_shape, poolsize=(2, 2), border_mode='same', activation=None, std=1e-2):
assert image_shape[1] == filter_shape[1]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = numpy.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) /
numpy.prod(poolsize))
if activation == nonlinearity.tanh or activation == Tnn.sigmoid:
# initialize weights with random weights
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
elif activation == nonlinearity.softplus or activation == nonlinearity.relu:
#W_bound = nonlinearity.initialize_matrix(rng, fan_in, fan_out)
W_bound = numpy.sqrt(6. / (fan_in + fan_out))
else:
raise Exception('Unknown activation in ConvMaxPool layer.')
self.W = theano.shared(
numpy.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
# the bias is a 1D tensor -- one bias per output feature map
b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# store parameters of this layer
self.params = [self.W, self.b]
self.poolsize = poolsize
self.filter_shape = filter_shape
self.image_shape = image_shape
self.border_mode = border_mode
self.activation = activation
def output(self, input):
# convolve input feature maps with filters
if self.border_mode == 'valid':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='valid'
)
elif self.border_mode == 'same':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='full'
)
padding_w = theano.shared((self.filter_shape[2] - 1) / 2)
padding_h = theano.shared((self.filter_shape[3] - 1) / 2)
conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
elif self.border_mode == 'full':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='full'
)
else:
raise Exception('Unknown conv type')
# downsample each feature map individually, using maxpooling
if self.poolsize[0] == 1 and self.poolsize[1] == 1:
pooled_out = conv_out
else:
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
return (
lin_output if self.activation is None
else self.activation(lin_output)
)
def drop_output(self, input, drop=0, rng=None, p=0.5):
# convolve input feature maps with filters
if self.border_mode == 'valid':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='valid'
)
elif self.border_mode == 'same':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='full'
)
padding_w = theano.shared((self.filter_shape[2] - 1) / 2)
padding_h = theano.shared((self.filter_shape[3] - 1) / 2)
conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
elif self.border_mode == 'full':
conv_out = conv.conv2d(
input=input,
filters=self.W,
filter_shape=self.filter_shape,
image_shape=self.image_shape,
border_mode='full'
)
else:
raise Exception('Unknown conv type')
# downsample each feature map individually, using maxpooling
if self.poolsize[0] == 1 and self.poolsize[1] == 1:
pooled_out = conv_out
else:
pooled_out = downsample.max_pool_2d(
input=conv_out,
ds=self.poolsize,
ignore_border=True
)
# add the bias term. Since the bias is a vector (1D array), we first
# reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
# thus be broadcasted across mini-batches and feature map
# width & height
lin_output = pooled_out + self.b.dimshuffle('x', 0, 'x', 'x')
output = (
lin_output if self.activation is None
else self.activation(lin_output)
)
droppedOutput = nonlinearity.dropout(rng, output, p)
return T.switch(T.neq(drop, 0), droppedOutput, output) | {
"content_hash": "19f3e67de7b6d517d46c3ef2a79c3fe1",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 119,
"avg_line_length": 37.6566265060241,
"alnum_prop": 0.5490321548552232,
"repo_name": "zhenxuan00/mmdgm",
"id": "8d4f4778d4d5826efc1b728f4f10cecffd7ce9e5",
"size": "6251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conv-mmdgm/layer/ConvMaxPool.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "787794"
},
{
"name": "Shell",
"bytes": "3369"
}
],
"symlink_target": ""
} |
__all__ = [
'__version__', '__author__',
'enable_attach', 'wait_for_attach', 'break_into_debugger', 'is_attached',
]
# "force_pydevd" must be imported first to ensure (via side effects)
# that the ptvsd-vendored copy of pydevd gets used.
from ._vendored import force_pydevd
from ptvsd.version import __version__, __author__
from ptvsd.attach_server import (
enable_attach, wait_for_attach, break_into_debugger, is_attached,
)
del force_pydevd
| {
"content_hash": "8932ec4b49ae19d0e232a2450ae3dd07",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 77,
"avg_line_length": 31.533333333333335,
"alnum_prop": 0.6701902748414377,
"repo_name": "SlicerRt/SlicerDebuggingTools",
"id": "d3bd8623aa26f5c41465e0b3099caa95847a59b0",
"size": "629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "406"
},
{
"name": "C",
"bytes": "13361"
},
{
"name": "C++",
"bytes": "105521"
},
{
"name": "CMake",
"bytes": "21408"
},
{
"name": "Cython",
"bytes": "69580"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Python",
"bytes": "3900091"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from rock.tasks import flow_utils
LOG = logging.getLogger(__name__)
class HostDisable(flow_utils.BaseTask):
default_provides = "host_disable_result"
def execute(self, target, disabled_reason, host_evacuate_result):
if not host_evacuate_result:
LOG.warning("Evacuate failed, not disabling host.")
return False
LOG.info("Trying to disable Host %ss.", target)
n_client = flow_utils.get_nova_client()
response = n_client.services.disable_log_reason(
host=target,
binary='nova-compute',
reason=disabled_reason
)
LOG.info("Host %s disabled for reason %s.", target, disabled_reason)
if response.status == 'disabled':
LOG.info("Host %s disabled successfully.", target)
return True
else:
LOG.error("Host %s disabled failed, reason %s.", target, response)
return False
| {
"content_hash": "333b11818d82770aee5f2ca2f75628e5",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 30.848484848484848,
"alnum_prop": 0.5943025540275049,
"repo_name": "unitedstack/rock",
"id": "6aa18c1fac4d7f981d61d17497c6652b93b9cc73",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rock/tasks/host_disable.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "106599"
},
{
"name": "Shell",
"bytes": "4392"
}
],
"symlink_target": ""
} |
def list_tables(dataset_id):
# [START bigquery_list_tables]
from google.cloud import bigquery
# Construct a BigQuery client object.
client = bigquery.Client()
# TODO(developer): Set dataset_id to the ID of the dataset that contains
# the tables you are listing.
# dataset_id = 'your-project.your_dataset'
tables = client.list_tables(dataset_id) # Make an API request.
print("Tables contained in '{}':".format(dataset_id))
for table in tables:
print("{}.{}.{}".format(table.project, table.dataset_id, table.table_id))
# [END bigquery_list_tables]
| {
"content_hash": "d7508357c9380eb6c0c4bb997f55c2dd",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 81,
"avg_line_length": 32.73684210526316,
"alnum_prop": 0.6479099678456591,
"repo_name": "tswast/google-cloud-python",
"id": "9ab527a4915f4fb0c412f3bbd5552e5597c70267",
"size": "1199",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigquery/samples/list_tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("./src")
from sat import SAT_solver
from sudoku import sudoku, printSudoku, processResult
from logConstructs import FlatCNF
print "================================================="
print "SUDOKU"
print "================================================="
solver = SAT_solver()
# define board as follows.
# board is array with nine arrays (rows).
# rows are arrays of nine elements.
# elements are None or int in [1,9].
# None - empty square.
board = [[None, 8, None, 1, 6, None, None, None, 7],
[1, None, 7, 4, None, 3, 6, None, None],
[3, None, None, 5, None, None, 4, 2, None],
[None, 9, None, None, 3, 2, 7, None, 4],
[None, None, None, None, None, None, None, None, None],
[2, None, 4, 8, 1, None, None, 6, None],
[None, 4, 1, None, None, 8, None, None, 6],
[None, None, 6, 7, None, 1, 9, None, 3],
[7, None, None, None, 9, 6, None, 4, None]]
# print sudoku from board definition.
print "Lab exercise:"
print printSudoku(board)
# construct logical formula from board definition.
formula = sudoku(board)
# solve formula using SAT solver(multithreading)
#result = solver.solve(FlatCNF(formula))
result = solver.solve(formula, True)
print "Solution:"
# process and print result of sat solver.
print printSudoku(processResult(result[1]))
medium_board = [[None, None, 5, None, 6, 3, 1, 2, None],
[None, None, 9, None, None, 1, None, 5, None],
[1, None, None, None, None, 8, 9, None, 6],
[None, None, None, None, 5, None, 8, None, 2],
[None, 5, None, None, None, None, None, 1, None],
[6, None, 1, None, 9, None, None, None, None],
[9, None, 6, 2, None, None, None, None, 1],
[None, 1, None, 6, None, None, 4, None, None],
[None, 4, 7, 3, 1, None, 2, None, None]]
print "Medium problem:"
print printSudoku(medium_board)
#result = solver.solve(FlatCNF(sudoku(medium_board)))
result = solver.solve(sudoku(medium_board), True)
print "Solution:"
# process and print result of sat solver.
print printSudoku(processResult(result[1]))
hard_board = [[None, None, 2, None, 8, None, None, 3, None],
[None, None, 5, 1, 9, None, None, None, 2],
[None, 8, None, None, None, None, None, 4, None],
[None, 9, 6, None, 5, None, None, None, None],
[2, None, 8, None, None, None, 3, None, 4],
[None, None, None, None, 3, None, 6, 9, None],
[None, 3, None, None, None, None, None, 2, None],
[8, None, None, None, 4, 6, 7, None, None],
[None, 6, None, None, 1, None, 5, None, None]]
print "Hard problem:"
print printSudoku(hard_board)
#result = solver.solve(FlatCNF(sudoku(hard_board)))
result = solver.solve(sudoku(hard_board), True)
print "Solution:"
# process and print result of sat solver.
print printSudoku(processResult(result[1]))
evil_board = [[None, 3, 6, None, 9, None, None, None, None],
[None, None, None, None, None, None, None, 4, 1],
[None, None, None, 7, 4, None, None, None, 3],
[None, 9, 1, None, None, None, None, None, None],
[2, None, None, 5, None, 3, None, None, 6],
[None, None, None, None, None, None, 2, 1, None],
[5, None, None, None, 2, 4, None, None, None],
[6, 8, None, None, None, None, None, None, None],
[None, None, None, None, 7, None, 6, 2, None]]
print "Evil problem:"
print printSudoku(evil_board)
#result = solver.solve(FlatCNF(sudoku(evil_board)))
result = solver.solve(sudoku(evil_board), True)
print "Solution:"
# process and print result of sat solver.
print printSudoku(processResult(result[1]))
#no solution sudoku
evil_board = [[None, 3, 6, None, 9, None, None, None, 3],
[None, None, None, None, None, None, None, 4, 1],
[None, None, None, 7, 4, None, None, None, 3],
[None, 9, 1, None, None, None, None, None, None],
[2, None, None, 5, None, 3, None, None, 6],
[None, None, None, None, None, None, 2, 1, None],
[5, None, None, None, 2, 4, None, None, None],
[6, 8, None, None, None, None, None, None, None],
[None, None, None, None, 7, None, 6, 2, None]]
print "Evil problem (no solution):"
print printSudoku(evil_board)
#result = solver.solve(FlatCNF(sudoku(evil_board)))
result = solver.solve(sudoku(evil_board), True)
print "Solution:"
if not result[0]:
print "No solution."
else:
# process and print result of sat solver.
print printSudoku(processResult(result[1]))
| {
"content_hash": "c77528aad99e44eb8d1ac770ff71dd9f",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 63,
"avg_line_length": 35.0078125,
"alnum_prop": 0.5942869895112698,
"repo_name": "urska19/LVR-sat",
"id": "00f0153d408051d45c194a3cea98b427860691f2",
"size": "4525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sudoku_example_mt.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "102113"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("common", "0034_auto_20210913_1918"),
("accounts", "0012_remove_account_company"),
]
operations = [
migrations.AddField(
model_name="account",
name="company",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="common.company",
),
),
migrations.AlterField(
model_name="account",
name="assigned_to",
field=models.ManyToManyField(
related_name="account_assigned_users", to="common.Profile"
),
),
migrations.AlterField(
model_name="account",
name="created_by",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="account_created_by",
to="common.profile",
),
),
]
| {
"content_hash": "fc513791c69bef71bde3d63bfe6c76a6",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 74,
"avg_line_length": 28.825,
"alnum_prop": 0.5108412836079792,
"repo_name": "MicroPyramid/Django-CRM",
"id": "5531f09d3af06f4d5f606f1f68e0f612331c0471",
"size": "1200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/migrations/0013_auto_20210913_1918.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "819"
},
{
"name": "HTML",
"bytes": "299393"
},
{
"name": "Python",
"bytes": "888791"
},
{
"name": "Shell",
"bytes": "1035"
}
],
"symlink_target": ""
} |
""" Vanilla RNN
@author Graham Taylor
"""
import numpy as np
import theano
import theano.tensor as T
from sklearn.base import BaseEstimator
import logging
import time
import os
import datetime
import pickle as pickle
import math
import matplotlib.pyplot as plt
plt.ion()
mode = theano.Mode(linker='cvm')
#mode = 'DEBUG_MODE'
class RNN(object):
""" Recurrent neural network class
Supported output types:
real : linear output units, use mean-squared error
binary : binary output units, use cross-entropy error
softmax : single softmax out, use cross-entropy error
"""
def __init__(self, input, n_in, n_hidden, n_out, activation=T.tanh,
output_type='real', use_symbolic_softmax=False):
self.input = input
self.activation = activation
self.output_type = output_type
# when using HF, SoftmaxGrad.grad is not implemented
# use a symbolic softmax which is slightly slower than T.nnet.softmax
# See: http://groups.google.com/group/theano-dev/browse_thread/
# thread/3930bd5a6a67d27a
if use_symbolic_softmax:
def symbolic_softmax(x):
e = T.exp(x)
return e / T.sum(e, axis=1).dimshuffle(0, 'x')
self.softmax = symbolic_softmax
else:
self.softmax = T.nnet.softmax
# recurrent weights as a shared variable
W_init = np.asarray(np.random.uniform(size=(n_hidden, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W = theano.shared(value=W_init, name='W')
# input to hidden layer weights
W_in_init = np.asarray(np.random.uniform(size=(n_in, n_hidden),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_in = theano.shared(value=W_in_init, name='W_in')
# hidden to output layer weights
W_out_init = np.asarray(np.random.uniform(size=(n_hidden, n_out),
low=-.01, high=.01),
dtype=theano.config.floatX)
self.W_out = theano.shared(value=W_out_init, name='W_out')
h0_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.h0 = theano.shared(value=h0_init, name='h0')
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
self.bh = theano.shared(value=bh_init, name='bh')
by_init = np.zeros((n_out,), dtype=theano.config.floatX)
self.by = theano.shared(value=by_init, name='by')
self.params = [self.W, self.W_in, self.W_out, self.h0,
self.bh, self.by]
# for every parameter, we maintain it's last update
# the idea here is to use "momentum"
# keep moving mostly in the same direction
self.updates = {}
for param in self.params:
init = np.zeros(param.get_value(borrow=True).shape,
dtype=theano.config.floatX)
self.updates[param] = theano.shared(init)
# recurrent function (using tanh activation function) and linear output
# activation function
def step(x_t, h_tm1):
h_t = self.activation(T.dot(x_t, self.W_in) + \
T.dot(h_tm1, self.W) + self.bh)
y_t = T.dot(h_t, self.W_out) + self.by
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entire sequence `y` (first dimension is always time)
[self.h, self.y_pred], _ = theano.scan(step,
sequences=self.input,
outputs_info=[self.h0, None])
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = 0
self.L1 += abs(self.W.sum())
self.L1 += abs(self.W_in.sum())
self.L1 += abs(self.W_out.sum())
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = 0
self.L2_sqr += (self.W ** 2).sum()
self.L2_sqr += (self.W_in ** 2).sum()
self.L2_sqr += (self.W_out ** 2).sum()
if self.output_type == 'real':
self.loss = lambda y: self.mse(y)
elif self.output_type == 'binary':
# push through sigmoid
self.p_y_given_x = T.nnet.sigmoid(self.y_pred) # apply sigmoid
self.y_out = T.round(self.p_y_given_x) # round to {0,1}
self.loss = lambda y: self.nll_binary(y)
elif self.output_type == 'softmax':
# push through softmax, computing vector of class-membership
# probabilities in symbolic form
self.p_y_given_x = self.softmax(self.y_pred)
# compute prediction as class whose probability is maximal
self.y_out = T.argmax(self.p_y_given_x, axis=-1)
self.loss = lambda y: self.nll_multiclass(y)
else:
raise NotImplementedError
def mse(self, y):
# error between output and target
return T.mean((self.y_pred - y) ** 2)
def nll_binary(self, y):
# negative log likelihood based on binary cross entropy error
return T.mean(T.nnet.binary_crossentropy(self.p_y_given_x, y))
def nll_multiclass(self, y):
# negative log likelihood based on multiclass cross entropy error
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of time steps (call it T) in the sequence
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the sequence
over the total number of examples in the sequence ; zero one
loss over the size of the sequence
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_out.ndim:
raise TypeError('y should have the same shape as self.y_out',
('y', y.type, 'y_out', self.y_out.type))
if self.output_type in ('binary', 'softmax'):
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_out, y))
else:
raise NotImplementedError()
class MetaRNN(BaseEstimator):
def __init__(self, n_in=5, n_hidden=50, n_out=5, learning_rate=0.01,
n_epochs=100, L1_reg=0.00, L2_reg=0.00, learning_rate_decay=1,
activation='tanh', output_type='real',
final_momentum=0.9, initial_momentum=0.5,
momentum_switchover=5,
use_symbolic_softmax=False):
self.n_in = int(n_in)
self.n_hidden = int(n_hidden)
self.n_out = int(n_out)
self.learning_rate = float(learning_rate)
self.learning_rate_decay = float(learning_rate_decay)
self.n_epochs = int(n_epochs)
self.L1_reg = float(L1_reg)
self.L2_reg = float(L2_reg)
self.activation = activation
self.output_type = output_type
self.initial_momentum = float(initial_momentum)
self.final_momentum = float(final_momentum)
self.momentum_switchover = int(momentum_switchover)
self.use_symbolic_softmax = use_symbolic_softmax
self.ready()
def ready(self):
# input (where first dimension is time)
self.x = T.matrix()
# target (where first dimension is time)
if self.output_type == 'real':
self.y = T.matrix(name='y', dtype=theano.config.floatX)
elif self.output_type == 'binary':
self.y = T.matrix(name='y', dtype='int32')
elif self.output_type == 'softmax': # only vector labels supported
self.y = T.vector(name='y', dtype='int32')
else:
raise NotImplementedError
# initial hidden state of the RNN
self.h0 = T.vector()
# learning rate
self.lr = T.scalar()
if self.activation == 'tanh':
activation = T.tanh
elif self.activation == 'sigmoid':
activation = T.nnet.sigmoid
elif self.activation == 'relu':
activation = lambda x: x * (x > 0)
elif self.activation == 'cappedrelu':
activation = lambda x: T.minimum(x * (x > 0), 6)
else:
raise NotImplementedError
self.rnn = RNN(input=self.x, n_in=self.n_in,
n_hidden=self.n_hidden, n_out=self.n_out,
activation=activation, output_type=self.output_type,
use_symbolic_softmax=self.use_symbolic_softmax)
if self.output_type == 'real':
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_pred,
mode=mode)
elif self.output_type == 'binary':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=T.round(self.rnn.p_y_given_x),
mode=mode)
elif self.output_type == 'softmax':
self.predict_proba = theano.function(inputs=[self.x, ],
outputs=self.rnn.p_y_given_x, mode=mode)
self.predict = theano.function(inputs=[self.x, ],
outputs=self.rnn.y_out, mode=mode)
else:
raise NotImplementedError
def shared_dataset(self, data_xy):
""" Load the dataset into shared variables """
data_x, data_y = data_xy
shared_x = theano.shared(np.asarray(data_x,
dtype=theano.config.floatX))
shared_y = theano.shared(np.asarray(data_y,
dtype=theano.config.floatX))
if self.output_type in ('binary', 'softmax'):
return shared_x, T.cast(shared_y, 'int32')
else:
return shared_x, shared_y
def __getstate__(self):
""" Return state sequence."""
params = self._get_params() # parameters set in constructor
weights = [p.get_value() for p in self.rnn.params]
state = (params, weights)
return state
def _set_weights(self, weights):
""" Set fittable parameters from weights sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
i = iter(weights)
for param in self.rnn.params:
param.set_value(i.next())
def __setstate__(self, state):
""" Set parameters from state sequence.
Parameters must be in the order defined by self.params:
W, W_in, W_out, h0, bh, by
"""
params, weights = state
self.set_params(**params)
self.ready()
self._set_weights(weights)
def save(self, fpath='.', fname=None):
""" Save a pickled representation of Model state. """
fpathstart, fpathext = os.path.splitext(fpath)
if fpathext == '.pkl':
# User supplied an absolute path to a pickle file
fpath, fname = os.path.split(fpath)
elif fname is None:
# Generate filename based on date
date_obj = datetime.datetime.now()
date_str = date_obj.strftime('%Y-%m-%d-%H:%M:%S')
class_name = self.__class__.__name__
fname = '%s.%s.pkl' % (class_name, date_str)
fabspath = os.path.join(fpath, fname)
logging.info("Saving to %s ..." % fabspath)
file = open(fabspath, 'wb')
state = self.__getstate__()
pickle.dump(state, file, protocol=pickle.HIGHEST_PROTOCOL)
file.close()
def load(self, path):
""" Load model parameters from path. """
logging.info("Loading from %s ..." % path)
file = open(path, 'rb')
state = pickle.load(file)
self.__setstate__(state)
file.close()
def fit(self, X_train, Y_train, X_test=None, Y_test=None,
validation_frequency=100):
""" Fit model
Pass in X_test, Y_test to compute test error and report during
training.
X_train : ndarray (n_seq x n_steps x n_in)
Y_train : ndarray (n_seq x n_steps x n_out)
validation_frequency : int
in terms of number of sequences (or number of weight updates)
"""
f = file('trainOutput.txt','a+')
if X_test is not None:
assert(Y_test is not None)
self.interactive = True
test_set_x, test_set_y = self.shared_dataset((X_test, Y_test))
else:
self.interactive = False
train_set_x, train_set_y = self.shared_dataset((X_train, Y_train))
n_train = train_set_x.get_value(borrow=True).shape[0]
if self.interactive:
n_test = test_set_x.get_value(borrow=True).shape[0]
######################
# BUILD ACTUAL MODEL #
######################
logging.info('... building the model')
index = T.lscalar('index') # index to a case
# learning rate (may change)
l_r = T.scalar('l_r', dtype=theano.config.floatX)
mom = T.scalar('mom', dtype=theano.config.floatX) # momentum
cost = self.rnn.loss(self.y) \
+ self.L1_reg * self.rnn.L1 \
+ self.L2_reg * self.rnn.L2_sqr
compute_train_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
if self.interactive:
compute_test_error = theano.function(inputs=[index, ],
outputs=self.rnn.loss(self.y),
givens={
self.x: test_set_x[index],
self.y: test_set_y[index]},
mode=mode)
# compute the gradient of cost with respect to theta = (W, W_in, W_out)
# gradients on the weights using BPTT
gparams = []
for param in self.rnn.params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = {}
for param, gparam in zip(self.rnn.params, gparams):
weight_update = self.rnn.updates[param]
upd = mom * weight_update - l_r * gparam
updates[weight_update] = upd
updates[param] = param + upd
# compiling a Theano function `train_model` that returns the
# cost, but in the same time updates the parameter of the
# model based on the rules defined in `updates`
train_model = theano.function(inputs=[index, l_r, mom],
outputs=cost,
updates=updates,
givens={
self.x: train_set_x[index],
self.y: train_set_y[index]},
mode=mode)
###############
# TRAIN MODEL #
###############
logging.info('... training')
epoch = 0
while (epoch < self.n_epochs):
epoch = epoch + 1
for idx in xrange(n_train):
effective_momentum = self.final_momentum \
if epoch > self.momentum_switchover \
else self.initial_momentum
example_cost = train_model(idx, self.learning_rate,
effective_momentum)
# iteration number (how many weight updates have we made?)
# epoch is 1-based, index is 0 based
iter = (epoch - 1) * n_train + idx + 1
if iter % validation_frequency == 0:
# compute loss on training set
train_losses = [compute_train_error(i)
for i in xrange(n_train)]
this_train_loss = np.mean(train_losses)
if self.interactive:
test_losses = [compute_test_error(i)
for i in xrange(n_test)]
this_test_loss = np.mean(test_losses)
f.write('epoch %i, seq %i/%i, tr loss %f '
'te loss %f lr: %f \n' % \
(epoch, idx + 1, n_train,
this_train_loss, this_test_loss, self.learning_rate))
else:
f.write('epoch %i, seq %i/%i, train loss %f '
'lr: %f \n' % \
(epoch, idx + 1, n_train, this_train_loss,
self.learning_rate))
self.learning_rate *= self.learning_rate_decay
f.close()
def test_real():
""" Test RNN with real-valued outputs. """
n_hidden = 200
n_in = 20
n_out = 5
n_steps = 10
n_seq = 100
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps, n_out))
targets[:, 1:, 0] = seq[:, :-1, 3] # delayed 1
targets[:, 1:, 1] = seq[:, :-1, 2] # delayed 1
targets[:, 2:, 2] = seq[:, :-2, 0] # delayed 2
targets += 0.01 * np.random.standard_normal(targets.shape)
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=400, activation='tanh')
model.fit(seq, targets, validation_frequency=1000)
[seqNum,lineNum,colNum] = targets.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[0])
ax1.set_title('input')
ax2 = plt.subplot(212)
true_targets = plt.plot(targets[0])
guess = model.predict(seq[0])
guessed_targets = plt.plot(guess, linestyle='--')
for i, x in enumerate(guessed_targets):
x.set_color(true_targets[i].get_color())
ax2.set_title('solid: true output, dashed: model output')
dif = abs(guess - targets[0])
[linedif,coldif] = dif.shape
print(linedif,coldif)
errorsum = 0
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[j][i] ** 2
error[i] = math.sqrt(sum/lineNum)
errorsum += error[i]
print(error[i])
print("average error = ", errorsum/colNum)
def test_binary(multiple_out=False, n_epochs=250):
""" Test RNN with binary outputs. """
n_hidden = 50
n_in = 69
n_out = 8
n_steps = 20
n_seq = 500
np.random.seed(0)
# simple lag test
seqlist = []
count = 0
data = []
for l in open("inputdata-b04-500-20.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
data.append(row)
if (count == n_steps):
count = 0
if len(data) >0:
seqlist.append(data)
data = []
seqarray = np.asarray(seqlist)
seq = seqarray[:,:,:n_in]
targets = seqarray[:,:,n_in:]
seqlistTest = []
count = 0
dataTest = []
for l in open("inputdata-b04-200-20.txt"):
count += 1
row = [int(x) for x in l.split()]
if len(row) > 0:
dataTest.append(row)
if (count == n_steps):
count = 0
if len(dataTest) >0:
seqlistTest.append(dataTest)
dataTest = []
seqarrayTest = np.asarray(seqlistTest)
seqTest = seqarrayTest[:,:,:n_in]
targetsTest = seqarrayTest[:,:,n_in:]
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh', output_type='binary')
#model.fit(seq, targets, validation_frequency=1000)
model.fit(seq, targets, seqTest, targetsTest, validation_frequency=1000)
ferror = file('errorRate.txt','a+')
[seqNum,lineNum,colNum] = targetsTest.shape
#print (seqTest.shape)
seqs = xrange(seqNum)
error = [0 for i in range(lineNum*seqNum)]
errorsum = 0
for k in seqs:
guess = model.predict_proba(seqTest[k])
dif = abs(guess - targetsTest[k])
[lineDif,colDif] = dif.shape
#print(lineDif,colDif)
for i in range (lineDif):
ki = k*lineDif+i
for j in range (colDif):
if (dif[i][j] > 0.5):
error[ki] += 1
ferror.write('error %d = %d \n' % (ki,error[ki]))
if (error[ki]>0):
errorsum += 1
print(errorsum)
errorRate = errorsum/1.0/seqNum/lineNum
ferror.write("average error = %f \n" % (errorRate))
## seqs = xrange(1)
##
## [seqNum,lineNum,colNum] = targets.shape
## print(seqNum,lineNum,colNum)
## error = [0 for i in range(colNum)]
##
## plt.close('all')
## for seq_num in seqs:
## fig = plt.figure()
## ax1 = plt.subplot(211)
## plt.plot(seq[seq_num])
## ax1.set_title('input')
## ax2 = plt.subplot(212)
## true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
##
## guess = model.predict_proba(seq[seq_num])
## guessed_targets = plt.step(xrange(n_steps), guess)
## plt.setp(guessed_targets, linestyle='--', marker='d')
## for i, x in enumerate(guessed_targets):
## x.set_color(true_targets[i].get_color())
## ax2.set_ylim((-0.1, 1.1))
## ax2.set_title('solid: true output, dashed: model output (prob)')
##
##
## dif = abs(guess - targets[seq_num])
## [lineDif,colDif] = dif.shape
## print(lineDif,colDif)
## errorsum = 0
## for i in range (colNum):
## for j in range (lineNum):
## if (dif[j][i] > 0.5):
## error[i] += 1
## print(error[i])
## errorsum += error[i]
## print("average error = ", errorsum/colNum)
def test_softmax(n_epochs=250):
""" Test RNN with softmax outputs. """
n_hidden = 10
n_in = 5
n_steps = 10
n_seq = 100
n_classes = 3
n_out = n_classes # restricted to single softmax per time step
np.random.seed(0)
# simple lag test
seq = np.random.randn(n_seq, n_steps, n_in)
targets = np.zeros((n_seq, n_steps), dtype=np.int)
thresh = 0.5
# if lag 1 (dim 3) is greater than lag 2 (dim 0) + thresh
# class 1
# if lag 1 (dim 3) is less than lag 2 (dim 0) - thresh
# class 2
# if lag 2(dim0) - thresh <= lag 1 (dim 3) <= lag2(dim0) + thresh
# class 0
targets[:, 2:][seq[:, 1:-1, 3] > seq[:, :-2, 0] + thresh] = 1
targets[:, 2:][seq[:, 1:-1, 3] < seq[:, :-2, 0] - thresh] = 2
#targets[:, 2:, 0] = np.cast[np.int](seq[:, 1:-1, 3] > seq[:, :-2, 0])
model = MetaRNN(n_in=n_in, n_hidden=n_hidden, n_out=n_out,
learning_rate=0.001, learning_rate_decay=0.999,
n_epochs=n_epochs, activation='tanh',
output_type='softmax', use_symbolic_softmax=False)
model.fit(seq, targets, validation_frequency=1000)
seqs = xrange(10)
[seqNum,lineNum,colNum] = seq.shape
print(seqNum,lineNum,colNum)
error = [0 for i in range(colNum)]
plt.close('all')
for seq_num in seqs:
fig = plt.figure()
ax1 = plt.subplot(211)
plt.plot(seq[seq_num])
ax1.set_title('input??')
ax2 = plt.subplot(212)
# blue line will represent true classes
true_targets = plt.step(xrange(n_steps), targets[seq_num], marker='o')
# show probabilities (in b/w) output by model
guess = model.predict_proba(seq[seq_num])
guessed_probs = plt.imshow(guess.T, interpolation='nearest',
cmap='gray')
ax2.set_title('blue: true class, grayscale: probs assigned by model')
dif = abs(seq[seq_num] - targets[seq_num])
for i in range (colNum):
sum = 0
for j in range (lineNum):
sum += dif[i,j] ** 2
error[i] = math.sqrt(sum/lineNum)
print(error[i])
if __name__ == "__main__":
logging.basicConfig(
level = logging.INFO,
format = 'LINE %(lineno)-4d %(levelname)-8s %(message)s',
datafmt = '%m-%d %H:%M',
filename = "D:/logresult20160123/one.log",
filemode = 'w')
t0 = time.time()
#test_real()
# problem takes more epochs to solve
test_binary(multiple_out=True, n_epochs=600)
#test_softmax(n_epochs=250)
print ("Elapsed time: %f" % (time.time() - t0))
| {
"content_hash": "6e365c3b3abcc252f46f7710835387ce",
"timestamp": "",
"source": "github",
"line_count": 714,
"max_line_length": 80,
"avg_line_length": 36.745098039215684,
"alnum_prop": 0.526719012044519,
"repo_name": "mengyun1993/RNN-binary",
"id": "e1377a219a830fcd7e6f37248f902a10384d4c80",
"size": "26236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "history code/rnn20160124.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "801900"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.