text stringlengths 4 1.02M | meta dict |
|---|---|
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.test.client import Client
import mock
from mock import patch
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase, requires_permission
from remo.featuredrep.models import FeaturedRep
from remo.featuredrep.tests import FeaturedRepFactory
from remo.profiles.tests import UserFactory
class ViewsTest(RemoTestCase):
def test_get_as_admin(self):
user = UserFactory.create(groups=['Admin'])
featured = FeaturedRepFactory.create()
with self.login(user) as client:
response = client.get(reverse('featuredrep_edit_featured', args=[featured.id]))
self.assertJinja2TemplateUsed(response, 'featuredrep_alter.jinja')
def test_get_as_council(self):
user = UserFactory.create(groups=['Council'])
featured = FeaturedRepFactory.create()
with self.login(user) as client:
response = client.get(reverse('featuredrep_edit_featured', args=[featured.id]))
self.assertJinja2TemplateUsed(response, 'featuredrep_alter.jinja')
@requires_permission()
def test_get_as_other_user(self):
user = UserFactory.create()
featured = FeaturedRepFactory.create()
with self.login(user) as client:
client.get(reverse('featuredrep_edit_featured', args=[featured.id]), follow=True)
def test_get_list_featured_page(self):
"""Get list featuredrep page."""
UserFactory.create(groups=['Admin'])
FeaturedRepFactory.create_batch(3)
response = Client().get(reverse('featuredrep_list_featured'))
self.assertJinja2TemplateUsed(response, 'featuredrep_list.jinja')
@patch('remo.featuredrep.views.messages.success')
@patch('remo.featuredrep.views.redirect', wraps=redirect)
@patch('remo.featuredrep.views.forms.FeaturedRepForm')
def test_add_new_featured(self, form_mock, redirect_mock, messages_mock):
form_mock.is_valid.return_value = True
user = UserFactory.create(groups=['Admin'])
with self.login(user) as client:
response = client.post(reverse('featuredrep_add_featured'), follow=True)
eq_(response.status_code, 200)
messages_mock.assert_called_with(mock.ANY, 'New featured rep article created.')
ok_(form_mock().save.called)
@patch('remo.featuredrep.views.messages.success')
@patch('remo.featuredrep.views.redirect', wraps=redirect)
@patch('remo.featuredrep.views.forms.FeaturedRepForm')
def test_edit_featured(self, form_mock, redirect_mock, messages_mock):
form_mock.is_valid.return_value = True
featured = FeaturedRepFactory.create()
user = UserFactory.create(groups=['Admin'])
with self.login(user) as client:
response = client.post(reverse('featuredrep_edit_featured', args=[featured.id]),
user=user, follow=True)
eq_(response.status_code, 200)
messages_mock.assert_called_with(
mock.ANY, 'Featured rep article successfuly edited.')
ok_(form_mock().save.called)
@patch('remo.featuredrep.views.redirect', wraps=redirect)
def test_delete_featured(self, redirect_mock):
user = UserFactory.create(groups=['Admin'])
featured = FeaturedRepFactory.create()
with self.login(user) as client:
client.post(reverse('featuredrep_delete_featured', args=[featured.id]))
ok_(not FeaturedRep.objects.filter(pk=featured.id).exists())
| {
"content_hash": "f6bcea0928a1e550ed4b1a64e0798324",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 93,
"avg_line_length": 45.41025641025641,
"alnum_prop": 0.6863354037267081,
"repo_name": "tsmrachel/remo",
"id": "739a869ff45ee6450991e60662e678f57ccebf97",
"size": "3542",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "remo/featuredrep/tests/test_views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "993"
},
{
"name": "CSS",
"bytes": "316677"
},
{
"name": "HTML",
"bytes": "333690"
},
{
"name": "JavaScript",
"bytes": "593637"
},
{
"name": "Python",
"bytes": "755215"
},
{
"name": "Shell",
"bytes": "715"
},
{
"name": "Smarty",
"bytes": "215"
}
],
"symlink_target": ""
} |
class MySQL:
def __init__(self, db):
self.__db = db
def pre_load(self):
# do nothing
return
def load(self, check_id, results):
for result in results:
responsetime = result.get('responsetime', None)
self.__db.query(
'REPLACE INTO pingdom_check_result (`check_id`, `at`, `probe_id`, `status`, `status_desc`, `status_desc_long`, `response_time`)\
VALUES (:check_id, FROM_UNIXTIME(:at), :probe_id, :status, :status_desc, :status_desc_long, :response_time)',
check_id=check_id,
at=result['time'],
probe_id=result['probeid'],
status=result['status'],
status_desc=result['statusdesc'],
status_desc_long=result['statusdesclong'],
response_time=responsetime
)
| {
"content_hash": "bc483b914a183c9bf5dbb0069c4648c1",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 144,
"avg_line_length": 39.86363636363637,
"alnum_prop": 0.5267958950969214,
"repo_name": "entering/pingdomexport",
"id": "ba6ba535e5d276300789e4ca57c76a1a499867d5",
"size": "877",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pingdomexport/load/check_results_mysql.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "59583"
}
],
"symlink_target": ""
} |
from django import forms
class AddServerForm(forms.Form):
displayName = forms.CharField(label='Name', max_length=32)
address = forms.CharField(label='Host Address', max_length=32)
port = forms.IntegerField(label='Host Port')
class AddJobForm(forms.Form):
command = forms.CharField(label='Command', max_length = 256)
priority = forms.IntegerField(label='Priority')
| {
"content_hash": "122622cfa9cdc0d0c9289e354d3e16f0",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 66,
"avg_line_length": 38.6,
"alnum_prop": 0.7305699481865285,
"repo_name": "ewerkema/job-scheduler",
"id": "2fd6936aaa954ebd0bfe1708ed2b51b1bc4726b1",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "job_scheduler_web/scheduler_web/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4954"
},
{
"name": "HTML",
"bytes": "12288"
},
{
"name": "Java",
"bytes": "92845"
},
{
"name": "Python",
"bytes": "27724"
}
],
"symlink_target": ""
} |
from .resource import Resource
class RouteTable(Resource):
"""Route table resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict
:param routes: Collection of routes contained within a route table.
:type routes: list of :class:`Route
<azure.mgmt.network.v2017_06_01.models.Route>`
:ivar subnets: A collection of references to subnets.
:vartype subnets: list of :class:`Subnet
<azure.mgmt.network.v2017_06_01.models.Subnet>`
:param provisioning_state: The provisioning state of the resource.
Possible values are: 'Updating', 'Deleting', and 'Failed'.
:type provisioning_state: str
:param etag: Gets a unique read-only string that changes whenever the
resource is updated.
:type etag: str
"""
_validation = {
'name': {'readonly': True},
'type': {'readonly': True},
'subnets': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'routes': {'key': 'properties.routes', 'type': '[Route]'},
'subnets': {'key': 'properties.subnets', 'type': '[Subnet]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, id=None, location=None, tags=None, routes=None, provisioning_state=None, etag=None):
super(RouteTable, self).__init__(id=id, location=location, tags=tags)
self.routes = routes
self.subnets = None
self.provisioning_state = provisioning_state
self.etag = etag
| {
"content_hash": "f2b9ea6cb85db5eafb8de4e19f53f8c4",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 107,
"avg_line_length": 36.54385964912281,
"alnum_prop": 0.6048967834853577,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "aad1840df0f2baacc4518ab8f0f2ba1a29b33aa6",
"size": "2557",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2017_06_01/models/route_table.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
import sys
import os
import pygame
from pygame.locals import *
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import game_lib
'''
def load_image(image_path):
from PIL import Image
from PIL import ImageDraw
image = Image.open(image_path)
draw = ImageDraw.Draw(image)
mode = image.mode
size = image.size
data = image.tobytes()
return pygame.image.fromstring(data, size, mode)
'''
pygame.init()
FPS = 30 # frames per second setting
fpsClock = pygame.time.Clock()
# set up the window
DISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)
pygame.display.set_caption('Animation')
WHITE = (255, 255, 255)
#catImg = pygame.image.load('cat.png')
catImg = game_lib.load_image('cat.png')
catx = 10
caty = 10
direction = 'right'
while True: # the main game loop
DISPLAYSURF.fill(WHITE)
if direction == 'right':
catx += 5
if catx == 280:
direction = 'down'
elif direction == 'down':
caty += 5
if caty == 220:
direction = 'left'
elif direction == 'left':
catx -= 5
if catx == 10:
direction = 'up'
elif direction == 'up':
caty -= 5
if caty == 10:
direction = 'right'
DISPLAYSURF.blit(catImg, (catx, caty))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(FPS)
| {
"content_hash": "07652a4f3683a51e43c284df85956969",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 72,
"avg_line_length": 21.61764705882353,
"alnum_prop": 0.5965986394557823,
"repo_name": "CospanDesign/python",
"id": "b2b86758cb9a74e8561cf91ef10f8e99ef33cda9",
"size": "1494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/animation/animation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "124288"
},
{
"name": "C++",
"bytes": "7418"
},
{
"name": "Jupyter Notebook",
"bytes": "802"
},
{
"name": "Makefile",
"bytes": "1265"
},
{
"name": "Python",
"bytes": "907010"
},
{
"name": "SWIG",
"bytes": "321"
},
{
"name": "Shell",
"bytes": "8831"
},
{
"name": "Verilog",
"bytes": "267332"
}
],
"symlink_target": ""
} |
import mock
from oslo_serialization import jsonutils
from heat.common import identifier
from heat.common import template_format
from heat.engine.clients.os import glance
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine.resources.aws.cfn.wait_condition_handle import (
WaitConditionHandle)
from heat.engine.resources.aws.ec2 import instance
from heat.engine.resources.openstack.nova import server
from heat.engine import scheduler
from heat.engine import service
from heat.engine import stack as stk
from heat.engine import template as tmpl
from heat.tests import common
from heat.tests import utils
TEST_TEMPLATE_METADATA = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"S1": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"AWS::CloudFormation::Init" : {
"config" : {
"files" : {
"/tmp/random_file" : {
"content" : { "Fn::Join" : ["", [
"s2-ip=", {"Fn::GetAtt": ["S2", "PublicIp"]}
]]},
"mode" : "000400",
"owner" : "root",
"group" : "root"
}
}
}
}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
TEST_TEMPLATE_WAIT_CONDITION = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Just a WaitCondition.",
"Parameters" : {
"KeyName" : {"Type" : "String", "Default": "mine" },
},
"Resources" : {
"WH" : {
"Type" : "AWS::CloudFormation::WaitConditionHandle"
},
"S1": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : { "Fn::Join" : [ "", [ "#!/bin/bash -v\n",
"echo ",
{ "Ref" : "WH" },
"\n" ] ] }
}
},
"WC" : {
"Type" : "AWS::CloudFormation::WaitCondition",
"DependsOn": "S1",
"Properties" : {
"Handle" : {"Ref" : "WH"},
"Timeout" : "5"
}
},
"S2": {
"Type": "AWS::EC2::Instance",
"Metadata" : {
"test" : {"Fn::GetAtt": ["WC", "Data"]}
},
"Properties": {
"ImageId" : "a",
"InstanceType" : "m1.large",
"KeyName" : { "Ref" : "KeyName" },
"UserData" : "#!/bin/bash -v\n"
}
}
}
}
'''
TEST_TEMPLATE_SERVER = '''
heat_template_version: 2013-05-23
resources:
instance1:
type: OS::Nova::Server
metadata: {"template_data": {get_attr: [instance2, networks]}}
properties:
image: cirros-0.3.2-x86_64-disk
flavor: m1.small
key_name: stack_key
instance2:
type: OS::Nova::Server
metadata: {'apples': 'pears'}
properties:
image: cirros-0.3.2-x86_64-disk
flavor: m1.small
key_name: stack_key
'''
class MetadataRefreshTest(common.HeatTestCase):
@mock.patch.object(nova.NovaClientPlugin, 'find_flavor_by_name_or_id')
@mock.patch.object(glance.GlanceClientPlugin, 'find_image_by_name_or_id')
@mock.patch.object(instance.Instance, 'handle_create')
@mock.patch.object(instance.Instance, 'check_create_complete')
@mock.patch.object(instance.Instance, 'FnGetAtt')
def test_FnGetAtt_metadata_updated(self, mock_get, mock_check,
mock_handle, *args):
"""Tests that metadata gets updated when FnGetAtt return changes."""
# Setup
temp = template_format.parse(TEST_TEMPLATE_METADATA)
template = tmpl.Template(temp,
env=environment.Environment({}))
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test_stack', template, disable_rollback=True)
stack.store()
self.stub_KeypairConstraint_validate()
# Configure FnGetAtt to return different values on subsequent calls
mock_get.side_effect = [
'10.0.0.1',
'10.0.0.2',
]
# Initial resolution of the metadata
stack.create()
# Sanity check on S2
s2 = stack['S2']
self.assertEqual((s2.CREATE, s2.COMPLETE), s2.state)
# Verify S1 is using the initial value from S2
s1 = stack['S1']
content = self._get_metadata_content(s1.metadata_get())
self.assertEqual('s2-ip=10.0.0.1', content)
# Run metadata update to pick up the new value from S2
s1.metadata_update()
s2.metadata_update()
# Verify the updated value is correct in S1
content = self._get_metadata_content(s1.metadata_get())
self.assertEqual('s2-ip=10.0.0.2', content)
# Verify outgoing calls
mock_get.assert_has_calls([
mock.call('PublicIp'),
mock.call('PublicIp')])
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
@staticmethod
def _get_metadata_content(m):
tmp = m['AWS::CloudFormation::Init']['config']['files']
return tmp['/tmp/random_file']['content']
class WaitConditionMetadataUpdateTest(common.HeatTestCase):
def setUp(self):
super(WaitConditionMetadataUpdateTest, self).setUp()
self.man = service.EngineService('a-host', 'a-topic')
self.man.create_periodic_tasks()
@mock.patch.object(nova.NovaClientPlugin, 'find_flavor_by_name_or_id')
@mock.patch.object(glance.GlanceClientPlugin, 'find_image_by_name_or_id')
@mock.patch.object(instance.Instance, 'handle_create')
@mock.patch.object(instance.Instance, 'check_create_complete')
@mock.patch.object(instance.Instance, 'is_service_available')
@mock.patch.object(scheduler.TaskRunner, '_sleep')
@mock.patch.object(WaitConditionHandle, 'identifier')
def test_wait_metadata(self, mock_identifier, mock_sleep, mock_available,
mock_check, mock_handle, *args):
"""Tests a wait condition metadata update after a signal call."""
mock_available.return_value = (True, None)
# Setup Stack
temp = template_format.parse(TEST_TEMPLATE_WAIT_CONDITION)
template = tmpl.Template(temp)
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test-stack', template, disable_rollback=True)
stack.store()
self.stub_KeypairConstraint_validate()
res_id = identifier.ResourceIdentifier('test_tenant_id', stack.name,
stack.id, '', 'WH')
mock_identifier.return_value = res_id
watch = stack['WC']
inst = stack['S2']
# Setup Sleep Behavior
self.run_empty = True
def check_empty(sleep_time):
self.assertEqual('{}', watch.FnGetAtt('Data'))
self.assertIsNone(inst.metadata_get()['test'])
def update_metadata(unique_id, data, reason):
self.man.resource_signal(ctx,
dict(stack.identifier()),
'WH',
{'Data': data, 'Reason': reason,
'Status': 'SUCCESS',
'UniqueId': unique_id},
sync_call=True)
def post_success(sleep_time):
update_metadata('123', 'foo', 'bar')
def side_effect_popper(sleep_time):
if self.run_empty:
self.run_empty = False
check_empty(sleep_time)
else:
post_success(sleep_time)
mock_sleep.side_effect = side_effect_popper
# Test Initial Creation
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
self.assertEqual('{"123": "foo"}', watch.FnGetAtt('Data'))
self.assertEqual('{"123": "foo"}', inst.metadata_get()['test'])
# Test Update
update_metadata('456', 'blarg', 'wibble')
self.assertEqual({'123': 'foo', '456': 'blarg'},
jsonutils.loads(watch.FnGetAtt('Data')))
self.assertEqual('{"123": "foo"}',
inst.metadata_get()['test'])
self.assertEqual(
{'123': 'foo', '456': 'blarg'},
jsonutils.loads(inst.metadata_get(refresh=True)['test']))
# Verify outgoing calls
self.assertTrue(mock_available.call_count > 0)
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
class MetadataRefreshServerTest(common.HeatTestCase):
@mock.patch.object(nova.NovaClientPlugin, 'find_flavor_by_name_or_id',
return_value=1)
@mock.patch.object(glance.GlanceClientPlugin, 'find_image_by_name_or_id',
return_value=1)
@mock.patch.object(server.Server, 'handle_create')
@mock.patch.object(server.Server, 'check_create_complete')
@mock.patch.object(server.Server, 'FnGetAtt')
def test_FnGetAtt_metadata_update(self, mock_get, mock_check,
mock_handle, *args):
temp = template_format.parse(TEST_TEMPLATE_SERVER)
template = tmpl.Template(temp,
env=environment.Environment({}))
ctx = utils.dummy_context()
stack = stk.Stack(ctx, 'test-stack', template, disable_rollback=True)
stack.store()
self.stub_KeypairConstraint_validate()
# Note dummy addresses are from TEST-NET-1 ref rfc5737
mock_get.side_effect = ['192.0.2.1', '192.0.2.2', '192.0.2.2']
# Test
stack.create()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
s1 = stack['instance1']
md = s1.metadata_get()
self.assertEqual({u'template_data': '192.0.2.1'}, md)
# Now set some metadata via the resource, like is done by
# _populate_deployments_metadata. This should be persisted over
# calls to metadata_update()
new_md = {u'template_data': '192.0.2.2', 'set_by_rsrc': 'orange'}
s1.metadata_set(new_md)
md = s1.metadata_get(refresh=True)
self.assertEqual(new_md, md)
s1.metadata_update()
md = s1.metadata_get(refresh=True)
self.assertEqual(new_md, md)
# Verify outgoing calls
mock_get.assert_has_calls([
mock.call('networks'),
mock.call('networks')])
self.assertEqual(2, mock_handle.call_count)
self.assertEqual(2, mock_check.call_count)
| {
"content_hash": "a8aea653f76dc9828202aedb92e1eaae",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 77,
"avg_line_length": 34.21406727828746,
"alnum_prop": 0.5538076510547014,
"repo_name": "cwolferh/heat-scratch",
"id": "4fdf2d6c049014cb5c7f0ef981de379cf78ad3f7",
"size": "11729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_metadata_refresh.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
} |
"""
A minimal front end to the Docutils Publisher, producing HTML slides using
the S5 template system.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates S5 (X)HTML slideshow documents from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='s5', description=description)
| {
"content_hash": "0f246df0adf935d34030cc47b1e2ef19",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 25.38888888888889,
"alnum_prop": 0.7308533916849015,
"repo_name": "santisiri/popego",
"id": "3cb6123ec5d402149e15c78bdc9824e4106d72ac",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/docutils-0.4-py2.5.egg/EGG-INFO/scripts/rst2s5.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
} |
import hyperion, time, math, random
randomCenter = bool(hyperion.args.get('random-center', False))
centerX = float(hyperion.args.get('center_x', -0.15))
centerY = float(hyperion.args.get('center_y', -0.25))
rotationTime = float(hyperion.args.get('rotation_time', 90))
colors = hyperion.args.get('colors', ((255,0,0),(255,255,0),(0,255,0),(0,255,255),(0,0,255),(255,0,255)))
reverse = bool(hyperion.args.get('reverse', False))
reverseTime = int(hyperion.args.get('reverse_time', 0))
#rotate = bool(hyperion.args.get('rotate', True))
positions = []
# calc center if random
if randomCenter:
centerX = random.uniform(0.0, 1.0)
centerY = random.uniform(0.0, 1.0)
rCenterX = int(round(float(hyperion.imageWidth())*centerX))
rCenterY = int(round(float(hyperion.imageHeight())*centerY))
#calc interval
sleepTime = max(1/(255/rotationTime), 0.016)
#calc diagonal
if centerX < 0.5:
cX = 1.0-centerX
else:
cX = 0.0+centerX
if centerY < 0.5:
cY = 1.0-centerY
else:
cY = 0.0+centerY
diag = int(round(math.sqrt(((cX*hyperion.imageWidth())**2)+((cY*hyperion.imageHeight())**2))))
# some diagonal overhead
diag = int(diag*1.3)
# calc positions
pos = 0
step = int(255/len(colors))
for _ in colors:
positions.append(pos)
pos += step
# target time
targetTime = time.time()+float(reverseTime)
#hyperion.imageCOffset(int(hyperion.imageWidth()/2), int(hyperion.imageHeight()/2))
while not hyperion.abort():
# verify reverseTime, randomize reverseTime based on reverseTime up to reversedTime*2
if reverseTime >= 1:
now = time.time()
if now > targetTime:
reverse = not reverse
targetTime = time.time()+random.uniform(float(reverseTime), float(reverseTime*2.0))
# apply rotate
#if rotate:
# hyperion.imageCRotate(1)
# prepare bytearray with colors and positions
gradientBa = bytearray()
it = 0
for color in colors:
gradientBa += bytearray((positions[it],color[0],color[1],color[2]))
it += 1
hyperion.imageRadialGradient(rCenterX,rCenterY, diag, gradientBa,0)
# increment positions
for i, pos in enumerate(positions):
if reverse:
positions[i] = pos - 1 if pos >= 1 else 255
else:
positions[i] = pos + 1 if pos <= 254 else 0
hyperion.imageShow()
time.sleep(sleepTime)
| {
"content_hash": "410d75d9f07aa7166fb07074abdce9ef",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 111,
"avg_line_length": 29.064935064935064,
"alnum_prop": 0.6921358355674709,
"repo_name": "hyperion-project/hyperion.ng",
"id": "05299dcc070acbd5cf7a763c5d12cffcefa6d63b",
"size": "2238",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "effects/waves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "2316"
},
{
"name": "C",
"bytes": "35637"
},
{
"name": "C++",
"bytes": "1855655"
},
{
"name": "CMake",
"bytes": "164022"
},
{
"name": "CSS",
"bytes": "43584"
},
{
"name": "HTML",
"bytes": "108839"
},
{
"name": "JavaScript",
"bytes": "698740"
},
{
"name": "Python",
"bytes": "105194"
},
{
"name": "Shell",
"bytes": "30879"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import os
import shutil
class HardForkDetectionTest(BitcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
self.nodes = []
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
with open(self.alert_filename, 'w') as f:
pass # Just open then close to create zero-length file
self.nodes.append(start_node(0, self.options.tmpdir,
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
def assert_safemode_off(self):
self.nodes[0].getbalance()
def assert_safemode_on(self, requiredMessage):
errorString = ""
try:
self.nodes[0].getbalance()
except JSONRPCException,e:
errorString = e.error['message']
assert_equal("Safe mode:" in errorString, True)
assert_equal(requiredMessage in errorString, True)
def run_test(self):
# Generate 10 blocks
self.nodes[0].generate(100)
# Invalidate all of them.
for block_height in range(100, 0, -1):
block_hash = self.nodes[0].getblockhash(block_height)
self.nodes[0].invalidateblock(block_hash)
# Check that safe mode is on.
self.assert_safemode_on("We do not appear to fully agree with our peers!")
# Check that an -alertnotify was triggered.
with open(self.alert_filename, 'r') as f:
alert_text = f.read()
if len(alert_text) == 0:
raise AssertionError("-alertnotify did not warn of detected hard fork")
# If our chain keeps growing, but the hard forking chain remains longer,
# safe mode should stay on.
self.nodes[0].generate(50)
self.assert_safemode_on("We do not appear to fully agree with our peers!")
# If we're on the longer side of the hard fork, safe mode should get
# turned off.
self.nodes[0].generate(50)
self.assert_safemode_off()
if __name__ == '__main__':
HardForkDetectionTest().main()
| {
"content_hash": "e71f7064b501f098dbb83d1c8936b108",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 108,
"avg_line_length": 35.12903225806452,
"alnum_prop": 0.6221303948576676,
"repo_name": "nginnever/zogminer",
"id": "d399dc9648b935c5cb3ebd6dfe97923c5f982a76",
"size": "2234",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/hardforkdetection.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1610"
},
{
"name": "C",
"bytes": "915842"
},
{
"name": "C++",
"bytes": "116344247"
},
{
"name": "CMake",
"bytes": "4640"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "19797"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "144169"
},
{
"name": "Makefile",
"bytes": "87774"
},
{
"name": "Objective-C",
"bytes": "3277"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Perl",
"bytes": "6275"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "442535"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "79271"
}
],
"symlink_target": ""
} |
"""Conditional Gradient optimizer."""
import tensorflow as tf
from tensorflow_addons.optimizers import KerasLegacyOptimizer
from tensorflow_addons.utils.types import FloatTensorLike
from typeguard import typechecked
from typing import Union, Callable
@tf.keras.utils.register_keras_serializable(package="Addons")
class ConditionalGradient(KerasLegacyOptimizer):
"""Optimizer that implements the Conditional Gradient optimization.
This optimizer helps handle constraints well.
Currently only supports frobenius norm constraint or nuclear norm
constraint.
See https://arxiv.org/pdf/1803.06453.pdf
```
variable -= (1-learning_rate) * (variable + lambda_ * gradient
/ (frobenius_norm(gradient) + epsilon))
```
Note that `lambda_` here refers to the constraint "lambda" in
the paper. `epsilon` is constant with tiny value as compared to
the value of frobenius norm of gradient. The purpose of `epsilon`
here is to avoid the case that the value of frobenius norm of
gradient is 0.
In this implementation, `epsilon` defaults to $10^{-7}$.
For nucler norm constraint, the formula is as following:
```
variable -= (1-learning_rate) * (variable
+ lambda_ * top_singular_vector(gradient))
```
"""
@typechecked
def __init__(
self,
learning_rate: Union[FloatTensorLike, Callable],
lambda_: Union[FloatTensorLike, Callable] = 0.01,
epsilon: FloatTensorLike = 1e-7,
ord: str = "fro",
name: str = "ConditionalGradient",
**kwargs,
):
"""Construct a new conditional gradient optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. or a schedule
that is a `tf.keras.optimizers.schedules.LearningRateSchedule`
The learning rate.
lambda_: A `Tensor` or a floating point value. The constraint.
epsilon: A `Tensor` or a floating point value. A small constant
for numerical stability when handling the case of norm of
gradient to be zero.
ord: Order of the norm. Supported values are `'fro'`
and `'nuclear'`. Default is `'fro'`, which is frobenius norm.
name: Optional name prefix for the operations created when
applying gradients. Defaults to 'ConditionalGradient'.
**kwargs: keyword arguments. Allowed to be {`clipnorm`,
`clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients
by norm; `clipvalue` is clip gradients by value, `decay` is
included for backward compatibility to allow time inverse
decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
"""
super().__init__(name=name, **kwargs)
self._set_hyper("learning_rate", kwargs.get("lr", learning_rate))
self._set_hyper("lambda_", lambda_)
self.epsilon = epsilon or tf.keras.backend.epsilon()
supported_norms = ["fro", "nuclear"]
if ord not in supported_norms:
raise ValueError(
"'ord' must be a supported matrix norm in %s, got '%s' instead"
% (supported_norms, ord)
)
self.ord = ord
def get_config(self):
config = {
"learning_rate": self._serialize_hyperparameter("learning_rate"),
"lambda_": self._serialize_hyperparameter("lambda_"),
"epsilon": self.epsilon,
"ord": self.ord,
}
base_config = super().get_config()
return {**base_config, **config}
def _create_slots(self, var_list):
for v in var_list:
self.add_slot(v, "conditional_gradient")
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)]["learning_rate"] = tf.identity(
self._get_hyper("learning_rate", var_dtype)
)
apply_state[(var_device, var_dtype)]["lambda_"] = tf.identity(
self._get_hyper("lambda_", var_dtype)
)
apply_state[(var_device, var_dtype)]["epsilon"] = tf.convert_to_tensor(
self.epsilon, var_dtype
)
@staticmethod
def _frobenius_norm(m):
return tf.reduce_sum(m**2) ** 0.5
@staticmethod
def _top_singular_vector(m):
# handle the case where m is a tensor of rank 0 or rank 1.
# Example:
# scalar (rank 0) a, shape []=> [[a]], shape [1,1]
# vector (rank 1) [a,b], shape [2] => [[a,b]], shape [1,2]
original_rank = tf.rank(m)
shape = tf.shape(m)
first_pad = tf.cast(tf.less(original_rank, 2), dtype=tf.int32)
second_pad = tf.cast(tf.equal(original_rank, 0), dtype=tf.int32)
new_shape = tf.concat(
[
tf.ones(shape=first_pad, dtype=tf.int32),
tf.ones(shape=second_pad, dtype=tf.int32),
shape,
],
axis=0,
)
n = tf.reshape(m, new_shape)
st, ut, vt = tf.linalg.svd(n, full_matrices=False)
n_size = tf.shape(n)
ut = tf.reshape(ut[:, 0], [n_size[0], 1])
vt = tf.reshape(vt[:, 0], [n_size[1], 1])
st = tf.matmul(ut, tf.transpose(vt))
# when we return the top singular vector, we have to remove the
# dimension we have added on
st_shape = tf.shape(st)
begin = tf.cast(tf.less(original_rank, 2), dtype=tf.int32)
end = 2 - tf.cast(tf.equal(original_rank, 0), dtype=tf.int32)
new_shape = st_shape[begin:end]
return tf.reshape(st, new_shape)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
lr = coefficients["learning_rate"]
lambda_ = coefficients["lambda_"]
epsilon = coefficients["epsilon"]
if self.ord == "fro":
norm = tf.convert_to_tensor(
self._frobenius_norm(grad), name="norm", dtype=var.dtype.base_dtype
)
s = grad / (norm + epsilon)
else:
top_singular_vector = tf.convert_to_tensor(
self._top_singular_vector(grad),
name="top_singular_vector",
dtype=var.dtype.base_dtype,
)
s = top_singular_vector
var_update = tf.math.multiply(var, lr) - (1 - lr) * lambda_ * s
return var.assign(var_update, use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = (apply_state or {}).get(
(var_device, var_dtype)
) or self._fallback_apply_state(var_device, var_dtype)
lr = coefficients["learning_rate"]
lambda_ = coefficients["lambda_"]
epsilon = coefficients["epsilon"]
var_slice = tf.gather(var, indices)
if self.ord == "fro":
norm = tf.convert_to_tensor(
self._frobenius_norm(grad), name="norm", dtype=var.dtype.base_dtype
)
s = grad / (norm + epsilon)
else:
top_singular_vector = tf.convert_to_tensor(
self._top_singular_vector(grad),
name="top_singular_vector",
dtype=var.dtype.base_dtype,
)
s = top_singular_vector
var_update_value = tf.math.multiply(var_slice, lr) - (1 - lr) * lambda_ * s
var_update_op = self._resource_scatter_update(var, indices, var_update_value)
return var_update_op
| {
"content_hash": "24b53536c86844828269c0ecaaf30f19",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 85,
"avg_line_length": 40.59487179487179,
"alnum_prop": 0.5852703385548257,
"repo_name": "tensorflow/addons",
"id": "6b796267103aacf312f9ac8724941f897fefc9dd",
"size": "8605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_addons/optimizers/conditional_gradient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "192270"
},
{
"name": "Dockerfile",
"bytes": "8576"
},
{
"name": "Python",
"bytes": "1788046"
},
{
"name": "Shell",
"bytes": "14386"
},
{
"name": "Smarty",
"bytes": "99722"
},
{
"name": "Starlark",
"bytes": "22515"
}
],
"symlink_target": ""
} |
"""
Functions for applying functions that act on arrays to xarray's labeled data.
"""
import functools
import itertools
import operator
import typing
from collections import Counter, OrderedDict
from distutils.version import LooseVersion
from typing import (
AbstractSet, Any, Callable, Iterable, List, Mapping, Optional, Sequence,
Tuple, Union)
import numpy as np
from . import duck_array_ops, utils
from .alignment import deep_align
from .merge import expand_and_merge_variables
from .pycompat import TYPE_CHECKING, dask_array_type
from .utils import is_dict_like
from .variable import Variable
if TYPE_CHECKING:
from .dataset import Dataset
_DEFAULT_FROZEN_SET = frozenset() # type: frozenset
_NO_FILL_VALUE = utils.ReprObject('<no-fill-value>')
_DEFAULT_NAME = utils.ReprObject('<default-name>')
_JOINS_WITHOUT_FILL_VALUES = frozenset({'inner', 'exact'})
class _UFuncSignature(object):
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = (self.all_input_core_dims |
self.all_output_core_dims)
return self._all_core_dims
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (self.input_core_dims == other.input_core_dims and
self.output_core_dims == other.output_core_dims)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('%s(%r, %r)'
% (type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims)))
def __str__(self):
lhs = ','.join('({})'.format(','.join(dims))
for dims in self.input_core_dims)
rhs = ','.join('({})'.format(','.join(dims))
for dims in self.output_core_dims)
return '{}->{}'.format(lhs, rhs)
def to_gufunc_string(self):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
"""
all_dims = self.all_core_dims
dims_map = dict(zip(sorted(all_dims), range(len(all_dims))))
input_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims]
output_core_dims = [['dim%d' % dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects: list) -> Any:
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, 'name', _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
name, = names
else:
name = None
return name
def _get_coord_variables(args):
input_coords = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coord_vars = getattr(coords, 'variables', coords)
input_coords.append(coord_vars)
return input_coords
def build_output_coords(
args: list,
signature: _UFuncSignature,
exclude_dims: AbstractSet = frozenset(),
) -> 'List[OrderedDict[Any, Variable]]':
"""Build output coordinates for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : optional set
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
OrderedDict of Variable objects with merged coordinates.
"""
input_coords = _get_coord_variables(args)
if exclude_dims:
input_coords = [OrderedDict((k, v) for k, v in coord_vars.items()
if exclude_dims.isdisjoint(v.dims))
for coord_vars in input_coords]
if len(input_coords) == 1:
# we can skip the expensive merge
unpacked_input_coords, = input_coords
merged = OrderedDict(unpacked_input_coords)
else:
merged = expand_and_merge_variables(input_coords)
output_coords = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered = OrderedDict((k, v) for k, v in merged.items()
if dropped_dims.isdisjoint(v.dims))
else:
filtered = merged
output_coords.append(filtered)
return output_coords
def apply_dataarray_vfunc(
func,
*args,
signature,
join='inner',
exclude_dims=frozenset(),
keep_attrs=False
):
"""Apply a variable level function over DataArray, Variable and/or ndarray
objects.
"""
from .dataarray import DataArray
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
if keep_attrs and hasattr(args[0], 'name'):
name = args[0].name
else:
name = result_name(args)
result_coords = build_output_coords(args, signature, exclude_dims)
data_vars = [getattr(a, 'variable', a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(DataArray(variable, coords, name=name, fastpath=True)
for variable, coords in zip(result_var, result_coords))
else:
coords, = result_coords
out = DataArray(result_var, coords, name=name, fastpath=True)
return out
def ordered_set_union(all_keys: List[Iterable]) -> Iterable:
result_dict = OrderedDict() # type: OrderedDict[Any, None]
for keys in all_keys:
for key in keys:
result_dict[key] = None
return result_dict.keys()
def ordered_set_intersection(all_keys: List[Iterable]) -> Iterable:
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
'exact match required for all data variable names, '
'but %r != %r' % (keys, first_keys))
return first_keys
_JOINERS = {
'inner': ordered_set_intersection,
'outer': ordered_set_union,
'left': operator.itemgetter(0),
'right': operator.itemgetter(-1),
'exact': assert_and_return_exact_match,
}
def join_dict_keys(
objects: Iterable[Union[Mapping, Any]], how: str = 'inner',
) -> Iterable:
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, 'keys')]
return joiner(all_keys)
def collect_dict_values(
objects: Iterable[Union[Mapping, Any]],
keys: Iterable,
fill_value: object = None,
) -> List[list]:
return [[obj.get(key, fill_value)
if is_dict_like(obj)
else obj
for obj in objects]
for key in keys]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars: Mapping[Any, Tuple[Variable]],
num_outputs: int,
) -> 'Tuple[OrderedDict[Any, Variable], ...]':
out = tuple(OrderedDict() for _ in range(num_outputs)) # type: ignore
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_vfunc(
func, *args, signature, join='inner', fill_value=None
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = OrderedDict()
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(
variables: 'OrderedDict[Any, Variable]',
coord_variables: Mapping[Any, Variable],
) -> 'Dataset':
"""Create a dataset as quickly as possible.
Beware: the `variables` OrderedDict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._from_vars_and_coord_names(variables, coord_names)
def apply_dataset_vfunc(
func,
*args,
signature,
join='inner',
dataset_join='exact',
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs=False
):
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from .dataset import Dataset
first_obj = args[0] # we'll copy attrs from this in case keep_attrs=True
if (dataset_join not in _JOINS_WITHOUT_FILL_VALUES and
fill_value is _NO_FILL_VALUE):
raise TypeError('to apply an operation to datasets with different '
'data variables with apply_ufunc, you must supply the '
'dataset_fill_value argument.')
if len(args) > 1:
args = deep_align(args, join=join, copy=False, exclude=exclude_dims,
raise_on_invalid=False)
list_of_coords = build_output_coords(args, signature, exclude_dims)
args = [getattr(arg, 'data_vars', arg) for arg in args]
result_vars = apply_dict_of_variables_vfunc(
func, *args, signature=signature, join=dataset_join,
fill_value=fill_value)
if signature.num_outputs > 1:
out = tuple(_fast_dataset(*args)
for args in zip(result_vars, list_of_coords))
else:
coord_vars, = list_of_coords
out = _fast_dataset(result_vars, coord_vars)
if keep_attrs and isinstance(first_obj, Dataset):
if isinstance(out, tuple):
out = tuple(ds._copy_attrs_from(first_obj) for ds in out)
else:
out._copy_attrs_from(first_obj)
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, 'must have at least one groupby to iterate over'
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError('apply_ufunc can only perform operations over '
'multiple GroupBy objets at once if they are all '
'grouped the same way')
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, 'dims') and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
'groupby operations cannot be performed with '
'xarray.Variable objects that share a dimension with '
'the grouped dimension')
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(
variables: Iterable[Variable],
exclude_dims: AbstractSet = frozenset()
) -> 'OrderedDict[Any, int]':
dim_sizes = OrderedDict() # type: OrderedDict[Any, int]
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError('broadcasting cannot handle duplicate '
'dimensions on a variable: %r' % list(var.dims))
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError('operands cannot be broadcast together '
'with mismatched lengths for dimension '
'%r: %s vs %s'
% (dim, dim_sizes[dim], size))
return dim_sizes
SLICE_NONE = slice(None)
def broadcast_compat_data(variable, broadcast_dims, core_dims):
# type: (Variable, tuple, tuple) -> Any
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
'operand to apply_ufunc has required core dimensions %r, but '
'some of these are missing on the input variable: %r'
% (list(core_dims), missing_core_dims))
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError('operand to apply_ufunc encountered unexpected '
'dimensions %r on an input variable: these are core '
'dimensions on other input or output variables'
% unexpected_dims)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask='forbidden',
output_dtypes=None,
output_sizes=None,
keep_attrs=False
):
"""Apply a ndarray level function over Variable and/or ndarray objects.
"""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes((a for a in args if hasattr(a, 'dims')),
exclude_dims=exclude_dims)
broadcast_dims = tuple(dim for dim in dim_sizes
if dim not in signature.all_core_dims)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)]
if any(isinstance(array, dask_array_type) for array in input_data):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
input_dims = [broadcast_dims + dims
for dims in signature.input_core_dims]
numpy_func = func
def func(*arrays):
return _apply_with_dask_atop(
numpy_func, arrays, input_dims, output_dims,
signature, output_dtypes, output_sizes)
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling in '
'apply_ufunc: {}'.format(dask))
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (not isinstance(result_data, tuple) or
len(result_data) != signature.num_outputs):
raise ValueError('applied function does not have the number of '
'outputs specified in the ufunc signature. '
'Result is not a tuple of {} elements: {!r}'
.format(signature.num_outputs, result_data))
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
'applied function returned data with unexpected '
'number of dimensions: {} vs {}, for dimensions {}'
.format(data.ndim, len(dims), dims))
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
'size of dimension {!r} on inputs was unexpectedly '
'changed by applied function from {} to {}. Only '
'dimensions specified in ``exclude_dims`` with '
'xarray.apply_ufunc are allowed to change size.'
.format(dim, dim_sizes[dim], new_size))
if keep_attrs and isinstance(args[0], Variable):
var.attrs.update(args[0].attrs)
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
def _apply_with_dask_atop(func, args, input_dims, output_dims, signature,
output_dtypes, output_sizes=None):
import dask.array as da
if signature.num_outputs > 1:
raise NotImplementedError('multiple outputs from apply_ufunc not yet '
"supported with dask='parallelized'")
if output_dtypes is None:
raise ValueError('output dtypes (output_dtypes) must be supplied to '
"apply_func when using dask='parallelized'")
if not isinstance(output_dtypes, list):
raise TypeError('output_dtypes must be a list of objects coercible to '
'numpy dtypes, got {}'.format(output_dtypes))
if len(output_dtypes) != signature.num_outputs:
raise ValueError('apply_ufunc arguments output_dtypes and '
'output_core_dims must have the same length: {} vs {}'
.format(len(output_dtypes), signature.num_outputs))
(dtype,) = output_dtypes
if output_sizes is None:
output_sizes = {}
new_dims = signature.all_output_core_dims - signature.all_input_core_dims
if any(dim not in output_sizes for dim in new_dims):
raise ValueError("when using dask='parallelized' with apply_ufunc, "
'output core dimensions not found on inputs must '
'have explicitly set sizes with ``output_sizes``: {}'
.format(new_dims))
for n, (data, core_dims) in enumerate(
zip(args, signature.input_core_dims)):
if isinstance(data, dask_array_type):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
'dimension {!r} on {}th function argument to '
"apply_ufunc with dask='parallelized' consists of "
'multiple chunks, but is also a core dimension. To '
'fix, rechunk into a single dask array chunk along '
'this dimension, i.e., ``.rechunk({})``, but beware '
'that this may significantly increase memory usage.'
.format(dim, n, {dim: -1}))
(out_ind,) = output_dims
atop_args = []
for arg, dims in zip(args, input_dims):
# skip leading dimensions that are implicitly added by broadcasting
ndim = getattr(arg, 'ndim', 0)
trimmed_dims = dims[-ndim:] if ndim else ()
atop_args.extend([arg, trimmed_dims])
return da.atop(func, out_ind, *atop_args, dtype=dtype, concatenate=True,
new_axes=output_sizes)
def apply_array_ufunc(func, *args, dask='forbidden'):
"""Apply a ndarray level function over ndarray objects."""
if any(isinstance(arg, dask_array_type) for arg in args):
if dask == 'forbidden':
raise ValueError('apply_ufunc encountered a dask array on an '
'argument, but handling for dask arrays has not '
'been enabled. Either set the ``dask`` argument '
'or load your data into memory first with '
'``.load()`` or ``.compute()``')
elif dask == 'parallelized':
raise ValueError("cannot use dask='parallelized' for apply_ufunc "
'unless at least one input is an xarray object')
elif dask == 'allowed':
pass
else:
raise ValueError('unknown setting for dask array handling: {}'
.format(dask))
return func(*args)
def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Optional[Sequence[Sequence]] = None,
output_core_dims: Optional[Sequence[Sequence]] = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = 'exact',
dataset_join: str = 'exact',
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool = False,
kwargs: Mapping = None,
dask: str = 'forbidden',
output_dtypes: Optional[Sequence] = None,
output_sizes: Optional[Mapping[Any, int]] = None
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, GroupBy, Variable, numpy/dask arrays or scalars
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : Sequence[Sequence], optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : List[tuple], optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {'outer', 'inner', 'left', 'right', 'exact'}, optional
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs: boolean, Optional
Whether to copy attributes from the first argument to the output.
kwargs: dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask: 'forbidden', 'allowed' or 'parallelized', optional
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array. If used, the ``output_dtypes`` argument must
also be provided. Multiple output arguments are not yet supported.
output_dtypes : list of dtypes, optional
Optional list of output dtypes. Only used if dask='parallelized'.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x ** 2 + y ** 2)
... return xr.apply_ufunc(func, a, b)
You can now apply ``magnitude()`` to ``xr.DataArray`` and ``xr.Dataset``
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[('x', [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.414214, 2.828427, 4.242641])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(4, 5)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension::
def mean(obj, dim):
# note: apply always moves core dimensions to the end
return apply_ufunc(np.mean, obj,
input_core_dims=[[dim]],
kwargs={'axis': -1})
Inner product over a specific dimension (like ``xr.dot``)::
def _inner(x, y):
result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
return result[..., 0, 0]
def inner_product(a, b, dim):
return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
Stack objects along a new dimension (like ``xr.concat``)::
def stack(objects, dim, new_coord):
# note: this version does not stack coordinates
func = lambda *x: np.stack(x, axis=-1)
result = apply_ufunc(func, *objects,
output_core_dims=[[dim]],
join='outer',
dataset_fill_value=np.nan)
result[dim] = new_coord
return result
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors::
import scipy.stats
def earth_mover_distance(first_samples,
second_samples,
dim='ensemble'):
return apply_ufunc(scipy.stats.wasserstein_distance,
first_samples, second_samples,
input_core_dims=[[dim], [dim]],
vectorize=True)
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in `apply`. You may find helper functions such as
numpy.broadcast_arrays helpful in writing your function. `apply_ufunc` also
works well with numba's vectorize and guvectorize. Further explanation with
examples are provided in the xarray documentation [3].
See also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
References
----------
.. [1] http://docs.scipy.org/doc/numpy/reference/ufuncs.html
.. [2] http://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html
.. [3] http://xarray.pydata.org/en/stable/computation.html#wrapping-custom-computation
""" # noqa: E501 # don't error on that URL one line up
from .groupby import GroupBy
from .dataarray import DataArray
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
'input_core_dims must be None or a tuple with the length same to '
'the number of arguments. Given input_core_dims: {}, '
'number of args: {}.'.format(input_core_dims, len(args)))
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims and not exclude_dims <= signature.all_core_dims:
raise ValueError('each dimension in `exclude_dims` must also be a '
'core dimension in the function signature')
if kwargs:
func = functools.partial(func, **kwargs)
if vectorize:
if signature.all_core_dims:
# we need the signature argument
if LooseVersion(np.__version__) < '1.12': # pragma: no cover
raise NotImplementedError(
'numpy 1.12 or newer required when using vectorize=True '
'in xarray.apply_ufunc with non-scalar output core '
'dimensions.')
func = np.vectorize(func,
otypes=output_dtypes,
signature=signature.to_gufunc_string())
else:
func = np.vectorize(func, otypes=output_dtypes)
variables_vfunc = functools.partial(apply_variable_ufunc, func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
output_dtypes=output_dtypes,
output_sizes=output_sizes)
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(apply_ufunc, func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask)
return apply_groupby_func(this_apply, *args)
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs)
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(variables_vfunc, *args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs)
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
return apply_array_ufunc(func, *args, dask=dask)
def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
arrays: DataArray (or Variable) objects
Arrays to compute.
dims: str or tuple of strings, optional
Which dimensions to sum over.
If not speciified, then all the common dimensions are summed over.
**kwargs: dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
dot: DataArray
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 4).reshape(3, 4), dims=['a', 'b'])
>>> da_b = xr.DataArray(np.arange(3 * 4 * 5).reshape(3, 4, 5),
>>> dims=['a', 'b', 'c'])
>>> da_c = xr.DataArray(np.arange(5 * 6).reshape(5, 6), dims=['c', 'd'])
>>>
>>> xr.dot(da_a, da_b, dims=['a', 'b']).dims
('c', )
>>> xr.dot(da_a, da_b, dims=['a']).dims
('b', 'c')
>>> xr.dot(da_a, da_b, da_c, dims=['b', 'c']).dims
('a', 'd')
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError('Only xr.DataArray and xr.Variable are supported.'
'Given {}.'.format([type(arr) for arr in arrays]))
if len(arrays) == 0:
raise TypeError('At least one array should be given.')
if isinstance(dims, str):
dims = (dims, )
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = 'abcdefghijklmnopqrstuvwxyz'
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims
if d in common_dims and d not in dims)
input_core_dims = [[d for d in arr.dims if d not in broadcast_dims]
for arr in arrays]
output_core_dims = [tuple(d for d in all_dims if d not in
dims + broadcast_dims)]
# older dask than 0.17.4, we use tensordot if possible.
if isinstance(arr.data, dask_array_type):
import dask
if LooseVersion(dask.__version__) < LooseVersion('0.17.4'):
if len(broadcast_dims) == 0 and len(arrays) == 2:
axes = [[arr.get_axis_num(d) for d in arr.dims if d in dims]
for arr in arrays]
return apply_ufunc(duck_array_ops.tensordot, *arrays,
dask='allowed',
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
kwargs={'axes': axes})
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = ['...' + ''.join([dim_map[d] for d in ds]) for ds
in input_core_dims]
subscripts = ','.join(subscripts_list)
subscripts += '->...' + ''.join([dim_map[d] for d in output_core_dims[0]])
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(func, *arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
dask='allowed')
return result.transpose(*[d for d in all_dims if d in result.dims])
def where(cond, x, y):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset with boolean dtype
When True, return values from `x`, otherwise returns values from `y`.
x, y : scalar, array, Variable, DataArray or Dataset
Values from which to choose. All dimension coordinates on these objects
must be aligned with each other and with `cond`.
Returns
-------
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> cond = xr.DataArray([True, False], dims=['x'])
>>> x = xr.DataArray([1, 2], dims=['y'])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where : equivalent methods
"""
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(duck_array_ops.where,
cond, x, y,
join='exact',
dataset_join='exact',
dask='allowed')
| {
"content_hash": "c368deaf76d387ef16a8899e603dd02a",
"timestamp": "",
"source": "github",
"line_count": 1117,
"max_line_length": 90,
"avg_line_length": 38.81915846016115,
"alnum_prop": 0.5910380295657388,
"repo_name": "chunweiyuan/xarray",
"id": "451d95ee542e5207d7ed7d5ae7c12f0b56937cad",
"size": "43361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xarray/core/computation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3150"
},
{
"name": "Python",
"bytes": "2336715"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myresume', '0011_delete_mycontent'),
]
operations = [
migrations.CreateModel(
name='MyContent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(max_length=255)),
('image_primary', models.CharField(max_length=200)),
('image_secondary', models.CharField(max_length=200)),
('h1', models.CharField(max_length=200)),
('h2', models.CharField(max_length=200)),
('body', models.TextField(max_length=5000)),
('h1_it', models.CharField(max_length=200)),
('h2_it', models.CharField(max_length=200)),
('body_it', models.TextField(max_length=5000)),
('layout_class', models.CharField(max_length=50)),
],
),
]
| {
"content_hash": "c9eb910b183c22af5b637cbddae7f532",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 114,
"avg_line_length": 37.172413793103445,
"alnum_prop": 0.5510204081632653,
"repo_name": "italomandara/mysite",
"id": "d16c5f6d9052fdba3fd51ae96054dc5df9a0d586",
"size": "1149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myresume/migrations/0012_mycontent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "261372"
},
{
"name": "HTML",
"bytes": "75306"
},
{
"name": "JavaScript",
"bytes": "47944"
},
{
"name": "Python",
"bytes": "64240"
}
],
"symlink_target": ""
} |
"""
pych - The Python/Chapel interoperability module.
"""
import logging
import pych.configuration
import pych.runtime
RT = None
CONFIG = None
logging.basicConfig( # Setup default logging
level=logging.ERROR,
format="%(levelname)s:%(module)s:%(funcName)s: %(message)s"
)
try:
CONFIG = pych.configuration.Configuration() # Load configuration
logging.basicConfig( # Setup logging
level=CONFIG["log_level"],
format="%(levelname)s:%(module)s:%(funcName)s: %(message)s"
)
RT = pych.runtime.Runtime(CONFIG) # Setup runtime
except ValueError as exc:
logging.error("Looks like there is an error in your configuration file.")
except IOError as exc:
logging.error("If you are installing just now, "
"then ignore this error message: "
"Could not find config-file(pych.json), "
"check your installation or try re-installing.")
| {
"content_hash": "553b45be055a2deb16b7db1a9d6c915e",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 34.3448275862069,
"alnum_prop": 0.6174698795180723,
"repo_name": "safl/pychapel",
"id": "ee1300ccb3dee75b3fa96df6b596f1a37daf130e",
"size": "996",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "module/pych/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "873471"
},
{
"name": "C++",
"bytes": "2871"
},
{
"name": "Chapel",
"bytes": "17023"
},
{
"name": "Makefile",
"bytes": "9821"
},
{
"name": "Python",
"bytes": "114586"
},
{
"name": "Shell",
"bytes": "9595"
}
],
"symlink_target": ""
} |
""" Base classes for all Hadoop Modules """
from init import configuration
from vistrails.core.modules.basic_modules import File, String
from vistrails.gui.modules.python_source_configure import \
PythonSourceConfigurationWidget
from vistrails.core.modules.config import ModuleSettings, IPort, OPort
from vistrails.core.modules.vistrails_module import Module, NotCacheable, \
ModuleError, ModuleSuspended
from remoteq.core.stack import select_machine, end_machine, use_machine, \
current_machine
from remoteq.batch.commandline import Subshell
from remoteq.batch.directories import CreateDirectory
import urllib
import xml.etree.cElementTree as ET
from init import RQModule
################################################################################
class HadoopBaseModule(RQModule):
"""
The base class for all modules in the Hadoop package. We would
like to implement a few basic functionality such as looking for
Hadoop installation directory, execute Hadoop commands, etc. for
subclasses to use
"""
_settings = ModuleSettings(abstract=True)
hadoop_configuration = None
def __init__(self):
Module.__init__(self)
def get_hadoop_home(self, machine):
HADOOP_HOME = machine.remote.send_command("echo $HADOOP_HOME").strip()
if HADOOP_HOME == '':
# raise ModuleError(self, 'HADOOP_HOME has to be defined')
# it does not actually. This means it is part of the system.
pass
return HADOOP_HOME
def read_site_config(self, machine):
config = HadoopBaseModule.hadoop_configuration
# For AWS
core_site = config['home']+'/conf/core-site.xml'
# for NYU/CUSP
#core_site = config['home']+'/etc/hadoop/conf/core-site.xml'
site_string = machine.remote.cat(core_site)
root = ET.fromstring(site_string)
for node in root:
name = node.find('name').text
value = node.find('value').text
config[name] = value
def get_hadoop_config(self, machine):
if HadoopBaseModule.hadoop_configuration==None:
hadoop_home = self.get_hadoop_home(machine)
# paths to try in order
streaming_paths = ['/share/hadoop/tools/lib/', # AWS
'/usr/lib/hadoop-mapreduce/', # NYU/CUSP
'/contrib/streaming/']
for path in streaming_paths:
hs = hadoop_home + path
command = ("python -c \"import os, os.path; print '' if not "
"os.path.exists('{0}') else ''.join([i for i in "
"os.listdir('{0}') if 'streaming' in i][-1:])\""
).format(hs)
streamingjar = machine.remote.send_command(command).strip()
if streamingjar:
break
if not streamingjar:
raise ModuleError(self,
'hadoop-streaming.jar not found. Please add '
'its directory to list of supported paths.')
hadoop = (hadoop_home + '/bin/hadoop') if hadoop_home else 'hadoop'
hdfs = (hadoop_home + '/bin/hdfs') if hadoop_home else 'hdfs'
if not machine.remote.command_exists(hdfs):
hdfs = hadoop
config = {'home': hadoop_home,
'hadoop': hadoop,
'hdfs': hdfs,
'streaming.jar': hs + streamingjar}
HadoopBaseModule.hadoop_configuration = config
# reading configuration files are error-prone
#self.read_site_config(machine)
config['fs.defaultFS'] = ''
# can access config only if hdfs command exists
if hadoop != hdfs:
config['fs.defaultFS'] = \
self.call_hdfs('getconf -confKey fs.defaultFS', machine)
return HadoopBaseModule.hadoop_configuration
def add_prefix(self, path, machine):
aliases = []
if configuration.check('uris') and configuration.uris:
aliases = dict([(uri.split('#')[1], uri.split('#')[0])
for uri in configuration.uris.split(';')])
if path in aliases:
return aliases[path]
if configuration.check('defaultFS'):
prefix = configuration.defaultFS
return prefix + path
else:
prefix = self.get_hadoop_config(machine)['fs.defaultFS']
return prefix + '/' + path
def call_hadoop(self, arguments, workdir, identifier, machine):
self.is_cacheable = lambda *args, **kwargs: False
config = self.get_hadoop_config(machine)
argList = [config['hadoop']]
if type(arguments) in [str, unicode]:
argList += arguments.split(' ')
elif type(arguments)==list:
argList += arguments
else:
raise ModuleError(self, 'Invalid argument types to hadoop')
# 1. this version returns when finished
#return subprocess.call(argList)
# 2. this version reads the results incrementally
# expect = machine.remote._expect_token
# machine.remote.push_expect(None) # Do not wait for call to finish
# result = machine.remote.send_command(" ".join(argList)).strip()
# machine.remote.pop_expect() # restore expect
# # We could show the output in a gui
# print "**** hadoop streaming running ****"
# print result,
# while not expect in result:
# output = machine.remote.consume_output()
# if output:
# print output,
# result += output
# 3. The final version should detach the process on the server
use_machine(machine)
cdir = CreateDirectory("remote", workdir)
job = Subshell("remote", command=" ".join(argList),
working_directory=workdir, identifier=identifier,
dependencies=[cdir])
job.run()
finished = job.finished()
if not finished:
status = job.status()
# The Subshell class provides the JobHandle interface, i.e.
# finished()
raise ModuleSuspended(self, '%s' % status, handle=job)
self.is_cacheable = lambda *args, **kwargs: True
return job.standard_error()
def call_hdfs(self, arguments, machine):
config = self.get_hadoop_config(machine)
argList = [config['hdfs']]
if type(arguments) in [str, unicode]:
argList += arguments.split(' ')
elif type(arguments)==list:
argList += arguments
else:
raise ModuleError(self, 'Invalid argument types to hdfs: %s'%type(arguments))
result = machine.remote.send_command(" ".join(argList)).strip()
return result
#return subprocess.call(argList)
################################################################################
class PythonSourceToFileConfigurationWidget(PythonSourceConfigurationWidget):
"""
A simple python source configuration widget, i.e. hiding all of
the input/output port table
"""
def __init__(self, module, controller, parent=None):
PythonSourceConfigurationWidget.__init__(self, module,
controller, parent)
self.inputPortTable.hide()
self.outputPortTable.hide()
self.setWindowTitle('Python Source Editor')
################################################################################
class PythonSourceToFile(Module):
"""
This is the class for specifying a python code snippet for running
with Hadoop Streaming, it will take its contents and output to a
temporary Python file. The code will not be passed around.
"""
_settings = ModuleSettings(namespace='hadoop',
configure_widget=PythonSourceToFileConfigurationWidget)
_input_ports = [IPort('Input File', File),
IPort('source', String, optional=True)]
_output_ports = [OPort('Temporary File', File)]
def compute(self):
inputFile = self.force_get_input('Input File')
if inputFile!=None:
# tempFile = file_pool.make_local_copy(inputFile.name)
tempFile = inputFile
else:
source = urllib.unquote(self.force_get_input('source', ''))
tempFile = self.interpreter.filePool.create_file()
f = open(tempFile.name, 'w')
f.write(source)
f.close()
self.set_output('Temporary File', tempFile)
################################################################################
def register():
return [HadoopBaseModule, PythonSourceToFile]
| {
"content_hash": "4457a705af58d0552693178b36623c91",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 89,
"avg_line_length": 42.52358490566038,
"alnum_prop": 0.5591791458679978,
"repo_name": "celiafish/VisTrails",
"id": "1f2468f8d16cf8b20c6c1efe84618f7e8eeda66c",
"size": "10895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/packages/RemoteQ/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
"""
WSGI config for hebrew_order_david project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hebrew_order_david.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| {
"content_hash": "c03822b90a89f3e7c3004871550f46e4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 27.9375,
"alnum_prop": 0.7762863534675615,
"repo_name": "dhosterman/hebrew_order_david",
"id": "64b49056318d9235d928d125d1ec9262365a5436",
"size": "447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hebrew_order_david/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1206"
},
{
"name": "CoffeeScript",
"bytes": "412"
},
{
"name": "HTML",
"bytes": "20575"
},
{
"name": "JavaScript",
"bytes": "11814"
},
{
"name": "Python",
"bytes": "92479"
}
],
"symlink_target": ""
} |
"""
regressiontests/serializers/models.py
Created by Maximillian Dornseif on 2009-08-17.
Copyright (c) 2009 HUDORA. All rights reserved.
"""
from django.db import models
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
last_visit = models.DateTimeField()
def __unicode__(self):
return u"%s the place" % self.name
def get_absolute_url(self):
return 'http://testserver/place/%d/' % self.id
| {
"content_hash": "2e55f3ae25b6b9b18b4db9dade8169ac",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 54,
"avg_line_length": 26.105263157894736,
"alnum_prop": 0.6774193548387096,
"repo_name": "hudora/huDjango",
"id": "90ed8c6586b5e36bb8a7905335fde58cf2456c4d",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/regressiontests/serializers/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "102049"
}
],
"symlink_target": ""
} |
"""
Created on Tue Sep 26 13:39:12 2017
@author: ekrupczak on PVAquire
Quickly display in console all figures in directory
"""
import os
import numpy as np
from PIL import Image
import pylab as plt
def view_tiffs(tiffs, path):
"""
Display tiffs in console
"""
# tiffs = [file for file in os.listdir(path) if ".TIF" in file]
for f in tiffs:
plt.figure()
plt.title(f)
plt.imshow(np.array(Image.open(path+f)))
def rescale_tiffs(tiffs, path, savepath, bits = 10):
"""
Rescale tiffs from X bits to 16 bits
Save as new images
"""
# tiffs = [file for file in os.listdir(path) if ".TIF" in file]
for f in tiffs:
img = np.array(Image.open(path+f))
imgrescale = img*2**(16-bits)
result = Image.fromarray(imgrescale)
result.save(savepath+"rescaled_"+f)
if __name__ == "__main__":
# path = "E:\\FiberLineTest\\A34_EO\\corrupted_frames_log\\"
# path = "E:\\PierJitterTest\\20170923_pierEO\\corrupted_frames_log\\"
# tiffs = [file for file in os.listdir(path) if ".TIF" in file]
# rescale_tiffs(tiffs, path, bits = 10)
# print("Number of corrupted frames: ", len(tiffs))
path = "E:\\PierJitterTest\\20170923_pierEO\\"
tiffs = [file for file in os.listdir(path) if ".TIF" in file]
tifflist = [t for t in tiffs if t[-9:-4] in [str(frame) for frame in range(14428, 14441)]]
rescale_tiffs(tifflist, path, path+"corrupted_frames_log\\14428_14440\\", bits = 10) | {
"content_hash": "48a9add0134c66956d4d5b031572c9c4",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 94,
"avg_line_length": 29.096153846153847,
"alnum_prop": 0.6186384666226041,
"repo_name": "emmettk/pvrsex",
"id": "cd85e7ebac3c5da13b72e4aba601a35009152ecb",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quick_tiff_viewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108485"
}
],
"symlink_target": ""
} |
from __future__ import annotations
"""
This module implements a parser and serializer for the CSV SPARQL result
formats
http://www.w3.org/TR/sparql11-results-csv-tsv/
"""
import codecs
import csv
from typing import IO, Dict, List, Optional, Union
from rdflib.plugins.sparql.processor import SPARQLResult
from rdflib.query import Result, ResultParser, ResultSerializer
from rdflib.term import BNode, Identifier, Literal, URIRef, Variable
class CSVResultParser(ResultParser):
def __init__(self):
self.delim = ","
# type error: Signature of "parse" incompatible with supertype "ResultParser"
def parse(self, source: IO, content_type: Optional[str] = None) -> Result: # type: ignore[override]
r = Result("SELECT")
# type error: Incompatible types in assignment (expression has type "StreamReader", variable has type "IO[Any]")
if isinstance(source.read(0), bytes):
# if reading from source returns bytes do utf-8 decoding
# type error: Incompatible types in assignment (expression has type "StreamReader", variable has type "IO[Any]")
source = codecs.getreader("utf-8")(source) # type: ignore[assignment]
reader = csv.reader(source, delimiter=self.delim)
r.vars = [Variable(x) for x in next(reader)]
r.bindings = []
for row in reader:
r.bindings.append(self.parseRow(row, r.vars))
return r
def parseRow(
self, row: List[str], v: List[Variable]
) -> Dict[Variable, Union[BNode, URIRef, Literal]]:
return dict(
(var, val)
for var, val in zip(v, [self.convertTerm(t) for t in row])
if val is not None
)
def convertTerm(self, t: str) -> Optional[Union[BNode, URIRef, Literal]]:
if t == "":
return None
if t.startswith("_:"):
return BNode(t) # or generate new IDs?
if t.startswith("http://") or t.startswith("https://"): # TODO: more?
return URIRef(t)
return Literal(t)
class CSVResultSerializer(ResultSerializer):
def __init__(self, result: SPARQLResult):
ResultSerializer.__init__(self, result)
self.delim = ","
if result.type != "SELECT":
raise Exception("CSVSerializer can only serialize select query results")
def serialize(self, stream: IO, encoding: str = "utf-8", **kwargs) -> None:
# the serialiser writes bytes in the given encoding
# in py3 csv.writer is unicode aware and writes STRINGS,
# so we encode afterwards
import codecs
stream = codecs.getwriter(encoding)(stream) # type: ignore[assignment]
out = csv.writer(stream, delimiter=self.delim)
vs = [self.serializeTerm(v, encoding) for v in self.result.vars] # type: ignore[union-attr]
out.writerow(vs)
for row in self.result.bindings:
out.writerow(
[self.serializeTerm(row.get(v), encoding) for v in self.result.vars] # type: ignore[union-attr]
)
def serializeTerm(
self, term: Optional[Identifier], encoding: str
) -> Union[str, Identifier]:
if term is None:
return ""
elif isinstance(term, BNode):
return f"_:{term}"
else:
return term
| {
"content_hash": "3a02585562a9484c525ac55610273d0a",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 124,
"avg_line_length": 33.63636363636363,
"alnum_prop": 0.6216216216216216,
"repo_name": "RDFLib/rdflib",
"id": "73f319773ea78c7c3dfbc9629d43ced7d1775705",
"size": "3330",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "rdflib/plugins/sparql/results/csvresults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "635"
},
{
"name": "HTML",
"bytes": "41303"
},
{
"name": "Python",
"bytes": "2828721"
},
{
"name": "Ruby",
"bytes": "31777"
},
{
"name": "Shell",
"bytes": "6030"
},
{
"name": "XSLT",
"bytes": "1588"
}
],
"symlink_target": ""
} |
class VButton(object):
ATTR_DISABLE_ON_CLICK = "dc"
| {
"content_hash": "4d26c438eb20dd6db93fa8fba7dcce8f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 19,
"alnum_prop": 0.6666666666666666,
"repo_name": "rwl/muntjac",
"id": "e42dcc8ecf60000f171590f545413c362f455145",
"size": "101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "muntjac/terminal/gwt/client/ui/v_button.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8602"
},
{
"name": "Java",
"bytes": "2243"
},
{
"name": "JavaScript",
"bytes": "32438"
},
{
"name": "Python",
"bytes": "3212361"
}
],
"symlink_target": ""
} |
"""
Click is a simple Python module inspired by the stdlib optparse to make
writing command line scripts fun. Unlike other modules, it's based
around a simple API that does not come with too much magic and is
composable.
"""
from .core import Argument
from .core import BaseCommand
from .core import Command
from .core import CommandCollection
from .core import Context
from .core import Group
from .core import MultiCommand
from .core import Option
from .core import Parameter
from .decorators import argument
from .decorators import command
from .decorators import confirmation_option
from .decorators import group
from .decorators import help_option
from .decorators import make_pass_decorator
from .decorators import option
from .decorators import pass_context
from .decorators import pass_obj
from .decorators import password_option
from .decorators import version_option
from .exceptions import Abort
from .exceptions import BadArgumentUsage
from .exceptions import BadOptionUsage
from .exceptions import BadParameter
from .exceptions import ClickException
from .exceptions import FileError
from .exceptions import MissingParameter
from .exceptions import NoSuchOption
from .exceptions import UsageError
from .formatting import HelpFormatter
from .formatting import wrap_text
from .globals import get_current_context
from .parser import OptionParser
from .termui import clear
from .termui import confirm
from .termui import echo_via_pager
from .termui import edit
from .termui import get_terminal_size
from .termui import getchar
from .termui import launch
from .termui import pause
from .termui import progressbar
from .termui import prompt
from .termui import secho
from .termui import style
from .termui import unstyle
from .types import BOOL
from .types import Choice
from .types import DateTime
from .types import File
from .types import FLOAT
from .types import FloatRange
from .types import INT
from .types import IntRange
from .types import ParamType
from .types import Path
from .types import STRING
from .types import Tuple
from .types import UNPROCESSED
from .types import UUID
from .utils import echo
from .utils import format_filename
from .utils import get_app_dir
from .utils import get_binary_stream
from .utils import get_os_args
from .utils import get_text_stream
from .utils import open_file
# Controls if click should emit the warning about the use of unicode
# literals.
disable_unicode_literals_warning = False
__version__ = "7.1.2"
| {
"content_hash": "56889fa9f08d5f15504e1c66326f8e65",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 71,
"avg_line_length": 31.17721518987342,
"alnum_prop": 0.8177019894437678,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "2b6008f2dd4176d819f06e0d7e92e43b142d30b0",
"size": "2463",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/click/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
} |
from typing import Optional, Iterator, Tuple, List
from parso.python.tokenize import tokenize
from parso.utils import parse_version_string
from parso.python.token import PythonTokenTypes
class NFAArc:
def __init__(self, next_: 'NFAState', nonterminal_or_string: Optional[str]):
self.next: NFAState = next_
self.nonterminal_or_string: Optional[str] = nonterminal_or_string
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.nonterminal_or_string)
class NFAState:
def __init__(self, from_rule: str):
self.from_rule: str = from_rule
self.arcs: List[NFAArc] = []
def add_arc(self, next_, nonterminal_or_string=None):
assert nonterminal_or_string is None or isinstance(nonterminal_or_string, str)
assert isinstance(next_, NFAState)
self.arcs.append(NFAArc(next_, nonterminal_or_string))
def __repr__(self):
return '<%s: from %s>' % (self.__class__.__name__, self.from_rule)
class GrammarParser:
"""
The parser for Python grammar files.
"""
def __init__(self, bnf_grammar: str):
self._bnf_grammar = bnf_grammar
self.generator = tokenize(
bnf_grammar,
version_info=parse_version_string('3.9')
)
self._gettoken() # Initialize lookahead
def parse(self) -> Iterator[Tuple[NFAState, NFAState]]:
# grammar: (NEWLINE | rule)* ENDMARKER
while self.type != PythonTokenTypes.ENDMARKER:
while self.type == PythonTokenTypes.NEWLINE:
self._gettoken()
# rule: NAME ':' rhs NEWLINE
self._current_rule_name = self._expect(PythonTokenTypes.NAME)
self._expect(PythonTokenTypes.OP, ':')
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.NEWLINE)
yield a, z
def _parse_rhs(self):
# rhs: items ('|' items)*
a, z = self._parse_items()
if self.value != "|":
return a, z
else:
aa = NFAState(self._current_rule_name)
zz = NFAState(self._current_rule_name)
while True:
# Add the possibility to go into the state of a and come back
# to finish.
aa.add_arc(a)
z.add_arc(zz)
if self.value != "|":
break
self._gettoken()
a, z = self._parse_items()
return aa, zz
def _parse_items(self):
# items: item+
a, b = self._parse_item()
while self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING) \
or self.value in ('(', '['):
c, d = self._parse_item()
# Need to end on the next item.
b.add_arc(c)
b = d
return a, b
def _parse_item(self):
# item: '[' rhs ']' | atom ['+' | '*']
if self.value == "[":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ']')
# Make it also possible that there is no token and change the
# state.
a.add_arc(z)
return a, z
else:
a, z = self._parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self._gettoken()
# Make it clear that we can go back to the old state and repeat.
z.add_arc(a)
if value == "+":
return a, z
else:
# The end state is the same as the beginning, nothing must
# change.
return a, a
def _parse_atom(self):
# atom: '(' rhs ')' | NAME | STRING
if self.value == "(":
self._gettoken()
a, z = self._parse_rhs()
self._expect(PythonTokenTypes.OP, ')')
return a, z
elif self.type in (PythonTokenTypes.NAME, PythonTokenTypes.STRING):
a = NFAState(self._current_rule_name)
z = NFAState(self._current_rule_name)
# Make it clear that the state transition requires that value.
a.add_arc(z, self.value)
self._gettoken()
return a, z
else:
self._raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def _expect(self, type_, value=None):
if self.type != type_:
self._raise_error("expected %s, got %s [%s]",
type_, self.type, self.value)
if value is not None and self.value != value:
self._raise_error("expected %s, got %s", value, self.value)
value = self.value
self._gettoken()
return value
def _gettoken(self):
tup = next(self.generator)
self.type, self.value, self.begin, prefix = tup
def _raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + list(map(str, args)))
line = self._bnf_grammar.splitlines()[self.begin[0] - 1]
raise SyntaxError(msg, ('<grammar>', self.begin[0],
self.begin[1], line))
| {
"content_hash": "d3e30e5734f60b952ebd641df96aab97",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 86,
"avg_line_length": 34.35064935064935,
"alnum_prop": 0.5156899810964083,
"repo_name": "glenngillen/dotfiles",
"id": "582efb43acef0caa81e8bddea8c4fb4e2c03ed62",
"size": "5515",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".vscode/extensions/ms-python.python-2022.2.1924087327/pythonFiles/lib/jedilsp/parso/pgen2/grammar_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ruby",
"bytes": "3634"
},
{
"name": "Shell",
"bytes": "4225"
},
{
"name": "Vim script",
"bytes": "16306"
}
],
"symlink_target": ""
} |
import os
import gc
import logging
class Diskspace(object):
def __init__(self):
gc.enable()
@staticmethod
def get_kind():
"""
return sensor kind
"""
return "mpdiskspace"
@staticmethod
def get_sensordef():
"""
Definition of the sensor and data to be shown in the PRTG WebGUI
"""
sensordefinition = {
"kind": Diskspace.get_kind(),
"name": "Disk space",
"description": "Monitors disk space on the system the mini probe is running on",
"default": "yes",
"help": "Monitors disk space on the system the mini probe is running on",
"tag": "spdiskspacesensor",
"fields": [],
"groups": []
}
return sensordefinition
@staticmethod
def get_data(data, out_queue):
diskspace = Diskspace()
try:
disk = diskspace.read_disk()
logging.debug("Running sensor: %s" % diskspace.get_kind())
except Exception as e:
logging.error("Ooops Something went wrong with '%s' sensor %s. Error: %s" % (diskspace.get_kind(),
data['sensorid'], e))
data = {
"sensorid": int(data['sensorid']),
"error": "Exception",
"code": 1,
"message": "Disk Space Sensor failed. See log for details"
}
out_queue.put(data)
return 1
channels = disk
data = {
"sensorid": int(data['sensorid']),
"message": "OK",
"channel": channels
}
del diskspace
gc.collect()
out_queue.put(data)
return 0
def read_disk(self):
disks = []
channel_list = []
for line in os.popen("df -k"):
if line.startswith("/"):
disks.append(line.rstrip().split())
for line in disks:
channel1 = {"name": "Total Bytes " + str(line[0]),
"mode": "integer",
"kind": "BytesDisk",
"value": int(line[1]) * 1024}
channel2 = {"name": "Used Bytes" + str(line[0]),
"mode": "integer",
"kind": "BytesDisk",
"value": int(line[2]) * 1024}
channel3 = {"name": "Free Bytes " + str(line[0]),
"mode": "integer",
"kind": "BytesDisk",
"value": int(line[3]) * 1024}
total = float(line[2]) + float(line[3])
used = float(line[2]) / total
free = float(line[3]) / total
channel4 = {"name": "Free Space " + str(line[0]),
"mode": "float",
"kind": "Percent",
"value": free * 100}
channel5 = {"name": "Used Space" + str(line[0]),
"mode": "float",
"kind": "Percent",
"value": used * 100}
channel_list.append(channel1)
channel_list.append(channel2)
channel_list.append(channel3)
channel_list.append(channel4)
channel_list.append(channel5)
return channel_list
| {
"content_hash": "bd1ffd6b33d77e2c19ee1c151fad7964",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 110,
"avg_line_length": 34.66326530612245,
"alnum_prop": 0.4450986164262585,
"repo_name": "eagle00789/PythonMiniProbe",
"id": "17fe3302c52616810e6edc10929168fb430ab340",
"size": "4957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miniprobe/sensors/diskspace.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "203383"
},
{
"name": "Shell",
"bytes": "3052"
},
{
"name": "Smarty",
"bytes": "105"
}
],
"symlink_target": ""
} |
import textwrap
import pytest
from pants.backend.helm.dependency_inference.unittest import (
HelmUnitTestChartDependencyInferenceFieldSet,
InferHelmUnitTestChartDependencyRequest,
)
from pants.backend.helm.dependency_inference.unittest import rules as infer_deps_rules
from pants.backend.helm.target_types import (
HelmChartTarget,
HelmUnitTestTestsGeneratorTarget,
HelmUnitTestTestTarget,
)
from pants.backend.helm.target_types import rules as target_types_rules
from pants.backend.helm.testutil import (
HELM_CHART_FILE,
HELM_VALUES_FILE,
K8S_SERVICE_TEMPLATE,
gen_chart_file,
)
from pants.build_graph.address import Address
from pants.engine.rules import QueryRule
from pants.engine.target import InferredDependencies
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
target_types=[HelmChartTarget, HelmUnitTestTestsGeneratorTarget, HelmUnitTestTestTarget],
rules=[
*target_types_rules(),
*infer_deps_rules(),
QueryRule(InferredDependencies, (InferHelmUnitTestChartDependencyRequest,)),
],
)
def test_infers_single_chart(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": textwrap.dedent(
"""\
helm_chart(name="foo")
"""
),
"Chart.yaml": HELM_CHART_FILE,
"values.yaml": HELM_VALUES_FILE,
"templates/service.yaml": K8S_SERVICE_TEMPLATE,
"tests/BUILD": textwrap.dedent(
"""\
helm_unittest_tests(name="foo_tests", sources=["*_test.yaml"])
"""
),
"tests/service_test.yaml": "",
}
)
chart_tgt = rule_runner.get_target(Address("", target_name="foo"))
unittest_tgt = rule_runner.get_target(Address("tests", target_name="foo_tests"))
inferred_deps = rule_runner.request(
InferredDependencies,
[
InferHelmUnitTestChartDependencyRequest(
HelmUnitTestChartDependencyInferenceFieldSet.create(unittest_tgt)
)
],
)
assert inferred_deps == InferredDependencies([chart_tgt.address])
def test_injects_parent_chart(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/chart1/BUILD": """helm_chart()""",
"src/chart1/Chart.yaml": gen_chart_file("chart1", version="0.1.0"),
"src/chart1/values.yaml": HELM_VALUES_FILE,
"src/chart1/templates/service.yaml": K8S_SERVICE_TEMPLATE,
"src/chart1/tests/BUILD": """helm_unittest_tests(sources=["*_test.yaml"])""",
"src/chart1/tests/service_test.yaml": "",
"src/chart2/BUILD": """helm_chart()""",
"src/chart2/Chart.yaml": gen_chart_file("chart2", version="0.1.0"),
"src/chart2/values.yaml": HELM_VALUES_FILE,
"src/chart2/templates/service.yaml": K8S_SERVICE_TEMPLATE,
"src/chart2/tests/BUILD": """helm_unittest_tests(sources=["*_test.yaml"])""",
"src/chart2/tests/service_test.yaml": "",
}
)
chart1_tgt = rule_runner.get_target(Address("src/chart1", target_name="chart1"))
chart1_unittest_tgt = rule_runner.get_target(Address("src/chart1/tests", target_name="tests"))
chart2_tgt = rule_runner.get_target(Address("src/chart2", target_name="chart2"))
chart2_unittest_tgt = rule_runner.get_target(Address("src/chart2/tests", target_name="tests"))
chart1_inferred_deps = rule_runner.request(
InferredDependencies,
[
InferHelmUnitTestChartDependencyRequest(
HelmUnitTestChartDependencyInferenceFieldSet.create(chart1_unittest_tgt)
)
],
)
chart2_inferred_deps = rule_runner.request(
InferredDependencies,
[
InferHelmUnitTestChartDependencyRequest(
HelmUnitTestChartDependencyInferenceFieldSet.create(chart2_unittest_tgt)
)
],
)
assert chart1_inferred_deps == InferredDependencies([chart1_tgt.address])
assert chart2_inferred_deps == InferredDependencies([chart2_tgt.address])
| {
"content_hash": "b67e17b95d22f19a28c359bb9da0bf39",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 98,
"avg_line_length": 36.60344827586207,
"alnum_prop": 0.6380122468205369,
"repo_name": "benjyw/pants",
"id": "80e6e1e67be8e5d0d36cf82e1cfb721d7dd3c3ed",
"size": "4378",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/helm/dependency_inference/unittest_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
} |
from django import forms
from wagtail.admin.staticfiles import versioned_static
class FilteredSelect(forms.Select):
"""
A select box where the options are shown and hidden dynamically in response to another
form field whose HTML `id` is specified in `filter_field`.
The `choices` list accepts entries of the form `(value, label, filter_values)` in addition
to the standard `(value, label)` tuples, where `filter_values` is a list of values;
whenever `filter_field` is set to a non-empty value, only the items with that value in their
`filter_values` list are shown.
filter_field and filter_values are inserted as 'data-' attributes on the rendered HTML, where
they are picked up by the JavaScript behaviour code -
see wagtailadmin/js/filtered-select.js for an example of how these attributes are configured.
"""
def __init__(self, attrs=None, choices=(), filter_field=""):
super().__init__(attrs, choices)
self.filter_field = filter_field
def build_attrs(self, base_attrs, extra_attrs=None):
my_attrs = {
"data-widget": "filtered-select",
"data-filter-field": self.filter_field,
}
if extra_attrs:
my_attrs.update(extra_attrs)
return super().build_attrs(base_attrs, my_attrs)
def optgroups(self, name, value, attrs=None):
# copy of Django's Select.optgroups, modified to accept filter_value as a
# third item in the tuple and expose that as a data-filter-value attribute
# on the final <option>
groups = []
has_selected = False
for index, choice in enumerate(self.choices):
try:
(option_value, option_label, filter_value) = choice
except ValueError:
# *ChoiceField will still output blank options as a 2-tuple,
# so need to handle that too
(option_value, option_label) = choice
filter_value = None
if option_value is None:
option_value = ""
subgroup = []
if isinstance(option_label, (list, tuple)):
# this is an optgroup - we will iterate over the list in the second item of
# the tuple (which has been assigned to option_label)
group_name = option_value
subindex = 0
choices = option_label
else:
# this is a top-level choice; put it in its own group with no name
group_name = None
subindex = None
choices = [(option_value, option_label, filter_value)]
groups.append((group_name, subgroup, index))
for choice in choices:
try:
(subvalue, sublabel, filter_value) = choice
except ValueError:
(subvalue, sublabel) = choice
filter_value = None
selected = str(subvalue) in value and (
not has_selected or self.allow_multiple_selected
)
has_selected |= selected
subgroup.append(
self.create_option(
name,
subvalue,
sublabel,
selected,
index,
subindex=subindex,
filter_value=filter_value,
)
)
if subindex is not None:
subindex += 1
return groups
def create_option(
self,
name,
value,
label,
selected,
index,
subindex=None,
attrs=None,
filter_value=None,
):
option = super().create_option(
name, value, label, selected, index, subindex=subindex, attrs=attrs
)
if filter_value is not None:
option["attrs"]["data-filter-value"] = ",".join(
[str(val) for val in filter_value]
)
return option
@property
def media(self):
return forms.Media(
js=[
versioned_static("wagtailadmin/js/filtered-select.js"),
]
)
| {
"content_hash": "272d440bc8d45d29b80d8ba9beb3a134",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 97,
"avg_line_length": 35.28688524590164,
"alnum_prop": 0.5396051103368177,
"repo_name": "thenewguy/wagtail",
"id": "a2e4db7332223b789f2f7451d0f7592547233564",
"size": "4305",
"binary": false,
"copies": "4",
"ref": "refs/heads/tng_master",
"path": "wagtail/admin/widgets/filtered_select.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2522"
},
{
"name": "Dockerfile",
"bytes": "2041"
},
{
"name": "HTML",
"bytes": "593033"
},
{
"name": "JavaScript",
"bytes": "615631"
},
{
"name": "Makefile",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "6571572"
},
{
"name": "SCSS",
"bytes": "219986"
},
{
"name": "Shell",
"bytes": "6845"
},
{
"name": "TypeScript",
"bytes": "288325"
}
],
"symlink_target": ""
} |
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.5 compatibility. This actually works for Python 2.6 and above,
# but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
| {
"content_hash": "8d4a6e731eafea797652b0613b876d26",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 85,
"avg_line_length": 34.377990430622006,
"alnum_prop": 0.6261656228253305,
"repo_name": "mixman/djangodev",
"id": "af44ff498ca239ab65f6a4ea97353b1ddf4284b0",
"size": "7185",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/utils/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
import sublime, sublime_plugin
history = []
menuitems = []
history_index = 0
def getClipboardData():
try:# win32
import win32clipboard
win32clipboard.OpenClipboard()
data = win32clipboard.GetClipboardData()
win32clipboard.CloseClipboard()
except:
pass
try:# windows7
import ctypes
ctypes.windll.user32.OpenClipboard(None)
pc = ctypes.windll.user32.GetClipboardData(1)
data = ctypes.c_char_p(pc).value.decode()
ctypes.windll.user32.CloseClipboard()
except:
pass
try:# mac
import subprocess
p = subprocess.Popen(['pbpaste'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xclip
import subprocess
p = subprocess.Popen(['xclip', '-o'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
try:# xsel
import subprocess
p = subprocess.Popen(['xclip', '-bo'], stdout=subprocess.PIPE)
retcode = p.wait()
clip = p.stdout.read()
data = clip.decode()
except:
pass
if not 'data' in locals():
return False
else:
return data
def setClipboardHistory():
global history_index, menuitems, history
data = getClipboardData()
if data == False:
return None
elif data in history:
return None
elif data == '':
return None
settings = sublime.load_settings('Sublime-clipboardRound.sublime-settings')
limit = settings.get('limit')
if not history or history[0] != data:
history.insert(0, data)
history_index = 0
menuitems = history
if limit < len(history):
for i in range(len(history) - limit):
history.pop()
menuitems.pop()
return None
def pasteClipboardHistory(self, text):
self.view.run_command('undo')
self.view.run_command('paste')
sublime.set_clipboard(text)
class Clip_round_showCommand(sublime_plugin.TextCommand):
def on_chosen(self, index):
global flag
if index == -1:
return
sublime.set_clipboard(menuitems[index])
self.view.run_command('paste')
flag = True
def run(self, edit):
global menuitems
if menuitems == []:
return None
self.view.window().show_quick_panel(menuitems, self.on_chosen, sublime.MONOSPACE_FONT)
class Clip_round_prevCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = min(history_index + 1, len(history) - 1)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_nextCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history_index
if history:
clip = sublime.get_clipboard()
history_index = max(history_index - 1, 0)
sublime.set_clipboard(history[history_index])
sublime.set_timeout(lambda:
pasteClipboardHistory(self, clip), 0)
class Clip_round_clearCommand(sublime_plugin.TextCommand):
def run(self, edit):
global history, history_index, menuitems, data
del menuitems[:]
del history[:]
history_index = 0
sublime.set_clipboard('')
print('clipboardRound: clear Clipboard History.')
class ClipboardRoundListener(sublime_plugin.EventListener):
def on_query_context(self, view, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
return None
def on_text_command(self, view, command, *args):
sublime.set_timeout(lambda:
setClipboardHistory(), 0)
| {
"content_hash": "7c2622e7b1508c48b3cdc93f25a2d4f4",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 94,
"avg_line_length": 27.143835616438356,
"alnum_prop": 0.6040878122634368,
"repo_name": "tgfjt/Sublime-clipboardRound",
"id": "94ea7fb3d06a55469b6ea49030af399cea86466d",
"size": "3963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "clipboardround.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3963"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('build', '0003_auto_20190525_2355'),
]
operations = [
migrations.AlterField(
model_name='build',
name='part',
field=models.ForeignKey(help_text='Select part to build', limit_choices_to={'active': True, 'buildable': True, 'is_template': False}, on_delete=django.db.models.deletion.CASCADE, related_name='builds', to='part.Part'),
),
]
| {
"content_hash": "3c2f25544e1bb13e0c72af32b0d877a0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 230,
"avg_line_length": 32.05882352941177,
"alnum_prop": 0.634862385321101,
"repo_name": "inventree/InvenTree",
"id": "1a43e5bfc8ec305fbcc23faec624a0877490c5cf",
"size": "592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InvenTree/build/migrations/0004_auto_20190525_2356.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246444"
},
{
"name": "Dockerfile",
"bytes": "7169"
},
{
"name": "HTML",
"bytes": "586821"
},
{
"name": "JavaScript",
"bytes": "1970070"
},
{
"name": "Procfile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2606104"
},
{
"name": "Shell",
"bytes": "27115"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nasa_id', models.IntegerField()),
('sol', models.IntegerField()),
('earth_date', models.DateField()),
],
),
]
| {
"content_hash": "4622afea98cee8bc90410445ad80073f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 24.73913043478261,
"alnum_prop": 0.5430579964850615,
"repo_name": "WillWeatherford/mars-rover",
"id": "f072c0ede26196c73b08135c4a264baec8670f99",
"size": "642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "photos/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "124777"
},
{
"name": "HTML",
"bytes": "18272"
},
{
"name": "JavaScript",
"bytes": "14348"
},
{
"name": "Python",
"bytes": "29583"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'LuckyBit API'
copyright = u'2014, LuckyBit Online Games'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'LuckyBitAPIdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'LuckyBitAPI.tex', u'LuckyBit API',
u'LuckyBit Support', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'luckybitapidocumentation', u'LuckyBit API',
[u'LuckyBit Support'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'LuckyBitAPI', u'LuckyBit API',
u'LuckyBit Support', 'LuckyBitAPI', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "562f34cc2d71c503fa8e4384fa0b9637",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.03493449781659,
"alnum_prop": 0.7018811341330425,
"repo_name": "LuckyBit/api",
"id": "b3f64e1f1fbd288f531872790c058f0f45ebabcd",
"size": "7759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5588"
},
{
"name": "Python",
"bytes": "7759"
}
],
"symlink_target": ""
} |
import uuid
import mock
import mox
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import db
from nova import exception
from nova.image import glance
from nova import objects
from nova.openstack.common import jsonutils
from nova.openstack.common import uuidutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_block_device
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
CONF.import_opt('password_length', 'nova.utils')
FAKE_UUID = fakes.FAKE_UUID
INSTANCE_IDS = {FAKE_UUID: 1}
def return_server_not_found(*arg, **kwarg):
raise exception.InstanceNotFound(instance_id='42')
def instance_update_and_get_original(context, instance_uuid, values,
update_cells=True,
columns_to_join=None,
):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
inst = dict(inst, **values)
return (inst, inst)
def instance_update(context, instance_uuid, kwargs, update_cells=True):
inst = fakes.stub_instance(INSTANCE_IDS[instance_uuid], host='fake_host')
return inst
class MockSetAdminPassword(object):
def __init__(self):
self.instance_id = None
self.password = None
def __call__(self, context, instance, password):
self.instance_id = instance['uuid']
self.password = password
class ServerActionsControllerTest(test.TestCase):
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
image_href = 'http://localhost/v3/images/%s' % image_uuid
def setUp(self):
super(ServerActionsControllerTest, self).setUp()
CONF.set_override('host', 'localhost', group='glance')
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
host='fake_host'))
self.stubs.Set(db, 'instance_update_and_get_original',
instance_update_and_get_original)
fakes.stub_out_nw_api(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
fake.stub_out_image_service(self.stubs)
self.flags(allow_instance_snapshots=True,
enable_instance_password=True)
self.uuid = FAKE_UUID
self.url = '/servers/%s/action' % self.uuid
self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6'
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
self.compute_api = self.controller.compute_api
self.context = context.RequestContext('fake', 'fake')
self.app = fakes.wsgi_app_v3(init_only=('servers',),
fake_auth_context=self.context)
def _make_request(self, url, body):
req = webob.Request.blank('/v3' + url)
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.content_type = 'application/json'
return req.get_response(self.app)
def _stub_instance_get(self, uuid=None):
self.mox.StubOutWithMock(compute_api.API, 'get')
if uuid is None:
uuid = uuidutils.generate_uuid()
instance = fake_instance.fake_db_instance(
id=1, uuid=uuid, vm_state=vm_states.ACTIVE, task_state=None)
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), instance)
self.compute_api.get(self.context, uuid, want_objects=True,
expected_attrs=['pci_devices']).AndReturn(instance)
return instance
def _test_locked_instance(self, action, method=None, body_map=None,
compute_api_args_map=None):
if method is None:
method = action
if body_map is None:
body_map = {}
if compute_api_args_map is None:
compute_api_args_map = {}
instance = self._stub_instance_get()
args, kwargs = compute_api_args_map.get(action, ((), {}))
getattr(compute_api.API, method)(self.context, instance,
*args, **kwargs).AndRaise(
exception.InstanceIsLocked(instance_uuid=instance['uuid']))
self.mox.ReplayAll()
res = self._make_request('/servers/%s/action' % instance['uuid'],
{action: body_map.get(action)})
self.assertEqual(409, res.status_int)
# Do these here instead of tearDown because this method is called
# more than once for the same test case
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_actions_with_locked_instance(self):
actions = ['resize', 'confirm_resize', 'revert_resize', 'reboot',
'rebuild']
body_map = {'resize': {'flavor_ref': '2'},
'reboot': {'type': 'HARD'},
'rebuild': {'image_ref': self.image_uuid,
'admin_password': 'TNc53Dr8s7vw'}}
args_map = {'resize': (('2'), {}),
'confirm_resize': ((), {}),
'reboot': (('HARD',), {}),
'rebuild': ((self.image_uuid, 'TNc53Dr8s7vw'), {})}
for action in actions:
self.mox.StubOutWithMock(compute_api.API, action)
self._test_locked_instance(action, method=None,
body_map=body_map,
compute_api_args_map=args_map)
def test_reboot_hard(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequestV3.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_soft(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequestV3.blank(self.url)
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_incorrect_type(self):
body = dict(reboot=dict(type="NOT_A_TYPE"))
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_missing_type(self):
body = dict(reboot=dict())
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_none(self):
body = dict(reboot=dict(type=None))
req = fakes.HTTPRequest.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_not_found(self):
self.stubs.Set(db, 'instance_get_by_uuid',
return_server_not_found)
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_reboot,
req, str(uuid.uuid4()), body)
def test_reboot_raises_conflict_on_invalid_state(self):
body = dict(reboot=dict(type="HARD"))
def fake_reboot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'reboot', fake_reboot)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_soft_with_soft_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="SOFT"))
req = fakes.HTTPRequestV3.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_reboot_hard_with_soft_in_progress_does_not_raise(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequestV3.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING))
self.controller._action_reboot(req, FAKE_UUID, body)
def test_reboot_hard_with_hard_in_progress_raises_conflict(self):
body = dict(reboot=dict(type="HARD"))
req = fakes.HTTPRequestV3.blank(self.url)
self.stubs.Set(db, 'instance_get_by_uuid',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE,
task_state=task_states.REBOOTING_HARD))
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_reboot,
req, FAKE_UUID, body)
def test_rebuild_accepted_minimum(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"image_ref": self._image_href,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(len(body['server']['admin_password']),
CONF.password_length)
self.assertEqual(robj['location'], self_href)
def test_rebuild_instance_with_image_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v3/'
body = {
'rebuild': {
'image_ref': self.image_uuid,
},
}
req = fakes.HTTPRequestV3.blank('/v3/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_instance_with_image_href_uses_uuid(self):
info = dict(image_href_in_call=None)
def rebuild(self2, context, instance, image_href, *args, **kwargs):
info['image_href_in_call'] = image_href
self.stubs.Set(db, 'instance_get',
fakes.fake_instance_get(vm_state=vm_states.ACTIVE))
self.stubs.Set(compute_api.API, 'rebuild', rebuild)
# proper local hrefs must start with 'http://localhost/v3/'
body = {
'rebuild': {
'image_ref': self.image_href,
},
}
req = fakes.HTTPRequestV3.blank('/v3/servers/a/action')
self.controller._action_rebuild(req, FAKE_UUID, body=body)
self.assertEqual(info['image_href_in_call'], self.image_uuid)
def test_rebuild_accepted_minimum_pass_disabled(self):
# run with enable_instance_password disabled to verify admin_password
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
self_href = 'http://localhost/v3/servers/%s' % FAKE_UUID
body = {
"rebuild": {
"image_ref": self._image_href,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
robj = self.controller._action_rebuild(req, FAKE_UUID, body=body)
body = robj.obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn("admin_password", body['server'])
self.assertEqual(robj['location'], self_href)
def test_rebuild_raises_conflict_on_invalid_state(self):
body = {
"rebuild": {
"image_ref": self._image_href,
},
}
def fake_rebuild(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'rebuild', fake_rebuild)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_accepted_with_metadata(self):
metadata = {'new': 'metadata'}
return_server = fakes.fake_instance_get(metadata=metadata,
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"image_ref": self._image_href,
"metadata": metadata,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
self.assertEqual(body['server']['metadata'], metadata)
def test_rebuild_accepted_with_bad_metadata(self):
body = {
"rebuild": {
"image_ref": self._image_href,
"metadata": "stack",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_with_too_large_metadata(self):
body = {
"rebuild": {
"image_ref": self._image_href,
"metadata": {
256 * "k": "value"
}
}
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_rebuild, req,
FAKE_UUID, body=body)
def test_rebuild_bad_entity(self):
body = {
"rebuild": {
"imageId": self._image_href,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_admin_password(self):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"image_ref": self._image_href,
"admin_password": "asdf",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertEqual(body['server']['admin_password'], 'asdf')
def test_rebuild_admin_password_pass_disabled(self):
# run with enable_instance_password disabled to verify admin_password
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE, host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"image_ref": self._image_href,
"admin_password": "asdf",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
self.assertEqual(body['server']['image']['id'], '2')
self.assertNotIn('admin_password', body['server'])
def test_rebuild_server_not_found(self):
def server_not_found(self, instance_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(db, 'instance_get_by_uuid', server_not_found)
body = {
"rebuild": {
"image_ref": self._image_href,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_with_bad_image(self):
body = {
"rebuild": {
"image_ref": "foo",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_when_kernel_not_exists(self):
def return_image_meta(*args, **kwargs):
image_meta_table = {
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
body = {
"rebuild": {
"image_ref": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_rebuild,
req, FAKE_UUID, body=body)
def test_rebuild_proper_kernel_ram(self):
instance_meta = {'kernel_id': None, 'ramdisk_id': None}
orig_get = compute_api.API.get
def wrap_get(*args, **kwargs):
inst = orig_get(*args, **kwargs)
instance_meta['instance'] = inst
return inst
def fake_save(context, **kwargs):
instance = instance_meta['instance']
for key in instance_meta.keys():
if key in instance.obj_what_changed():
instance_meta[key] = instance[key]
def return_image_meta(*args, **kwargs):
image_meta_table = {
'1': {'id': 1, 'status': 'active', 'container_format': 'aki'},
'2': {'id': 2, 'status': 'active', 'container_format': 'ari'},
'155d900f-4e14-4e4c-a73d-069cbf4541e6':
{'id': 3, 'status': 'active', 'container_format': 'raw',
'properties': {'kernel_id': 1, 'ramdisk_id': 2}},
}
image_id = args[2]
try:
image_meta = image_meta_table[str(image_id)]
except KeyError:
raise exception.ImageNotFound(image_id=image_id)
return image_meta
self.stubs.Set(fake._FakeImageService, 'show', return_image_meta)
self.stubs.Set(compute_api.API, 'get', wrap_get)
self.stubs.Set(objects.Instance, 'save', fake_save)
body = {
"rebuild": {
"image_ref": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.controller._action_rebuild(req, FAKE_UUID, body=body).obj
self.assertEqual(instance_meta['kernel_id'], '1')
self.assertEqual(instance_meta['ramdisk_id'], '2')
def _test_rebuild_preserve_ephemeral(self, value=None):
return_server = fakes.fake_instance_get(image_ref='2',
vm_state=vm_states.ACTIVE,
host='fake_host')
self.stubs.Set(db, 'instance_get_by_uuid', return_server)
body = {
"rebuild": {
"image_ref": self._image_href,
},
}
if value is not None:
body['rebuild']['preserve_ephemeral'] = value
req = fakes.HTTPRequestV3.blank(self.url)
context = req.environ['nova.context']
self.mox.StubOutWithMock(compute_api.API, 'rebuild')
if value is not None:
compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
mox.IgnoreArg(), preserve_ephemeral=value)
else:
compute_api.API.rebuild(context, mox.IgnoreArg(), self._image_href,
mox.IgnoreArg())
self.mox.ReplayAll()
self.controller._action_rebuild(req, FAKE_UUID, body=body)
def test_rebuild_preserve_ephemeral_true(self):
self._test_rebuild_preserve_ephemeral(True)
def test_rebuild_preserve_ephemeral_false(self):
self._test_rebuild_preserve_ephemeral(False)
def test_rebuild_preserve_ephemeral_default(self):
self._test_rebuild_preserve_ephemeral()
def test_resize_server(self):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
self.resize_called = False
def resize_mock(*args):
self.resize_called = True
self.stubs.Set(compute_api.API, 'resize', resize_mock)
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_resize(req, FAKE_UUID, body)
self.assertEqual(self.resize_called, True)
def test_resize_server_no_flavor(self):
body = dict(resize=dict())
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_server_no_flavor_ref(self):
body = dict(resize=dict(flavor_ref=None))
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_server_not_found(self):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
self.stubs.Set(compute_api.API, 'get', return_server_not_found)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_with_image_exceptions(self):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
self.resize_called = 0
image_id = 'fake_image_id'
exceptions = [
(exception.ImageNotAuthorized(image_id=image_id),
webob.exc.HTTPUnauthorized),
(exception.ImageNotFound(image_id=image_id),
webob.exc.HTTPBadRequest),
(exception.Invalid, webob.exc.HTTPBadRequest),
]
raised, expected = map(iter, zip(*exceptions))
def _fake_resize(obj, context, instance, flavor_id):
self.resize_called += 1
raise raised.next()
self.stubs.Set(compute_api.API, 'resize', _fake_resize)
for call_no in range(len(exceptions)):
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(expected.next(),
self.controller._action_resize,
req, FAKE_UUID, body)
self.assertEqual(self.resize_called, call_no + 1)
def test_resize_with_too_many_instances(self):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.TooManyInstances(message="TooManyInstance")
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller._action_resize,
req, FAKE_UUID, body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.CannotResizeDisk(reason=''))
def test_resize_raises_cannot_resize_disk(self, mock_resize):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
@mock.patch('nova.compute.api.API.resize',
side_effect=exception.FlavorNotFound(reason='',
flavor_id='fake_id'))
def test_resize_raises_flavor_not_found(self, mock_resize):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_resize_raises_conflict_on_invalid_state(self):
body = dict(resize=dict(flavor_ref="http://localhost/3"))
def fake_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'resize', fake_resize)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_resize,
req, FAKE_UUID, body)
def test_confirm_resize_server(self):
body = dict(confirm_resize=None)
self.confirm_resize_called = False
def cr_mock(*args):
self.confirm_resize_called = True
self.stubs.Set(compute_api.API, 'confirm_resize', cr_mock)
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_confirm_resize(req, FAKE_UUID, body)
self.assertEqual(self.confirm_resize_called, True)
def test_confirm_resize_migration_not_found(self):
body = dict(confirm_resize=None)
def confirm_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'confirm_resize',
confirm_resize_mock)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_confirm_resize_raises_conflict_on_invalid_state(self):
body = dict(confirm_resize=None)
def fake_confirm_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'confirm_resize',
fake_confirm_resize)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_confirm_resize,
req, FAKE_UUID, body)
def test_revert_resize_migration_not_found(self):
body = dict(revertResize=None)
def revert_resize_mock(*args):
raise exception.MigrationNotFoundByStatus(instance_id=1,
status='finished')
self.stubs.Set(compute_api.API,
'revert_resize',
revert_resize_mock)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_revert_resize_server_not_found(self):
body = dict(revertResize=None)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob. exc.HTTPNotFound,
self.controller._action_revert_resize,
req, "bad_server_id", body)
def test_revert_resize_server(self):
body = dict(revertResize=None)
self.revert_resize_called = False
def revert_mock(*args):
self.revert_resize_called = True
self.stubs.Set(compute_api.API, 'revert_resize', revert_mock)
req = fakes.HTTPRequestV3.blank(self.url)
body = self.controller._action_revert_resize(req, FAKE_UUID, body)
self.assertEqual(self.revert_resize_called, True)
def test_revert_resize_raises_conflict_on_invalid_state(self):
body = dict(revertResize=None)
def fake_revert_resize(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'revert_resize',
fake_revert_resize)
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_revert_resize,
req, FAKE_UUID, body)
def test_create_image(self):
body = {
'create_image': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequestV3.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual(glance.generate_image_url('123'), location)
def test_create_image_name_too_long(self):
long_name = 'a' * 260
body = {
'create_image': {
'name': long_name,
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image, req,
FAKE_UUID, body)
def _do_test_create_volume_backed_image(self, extra_properties):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(create_image=dict(name='snapshot_of_volume_backed'))
if extra_properties:
body['create_image']['metadata'] = extra_properties
image_service = glance.get_default_image_service()
bdm = [dict(volume_id=_fake_id('a'),
volume_size=1,
device_name='vda',
delete_on_termination=False)]
props = dict(kernel_id=_fake_id('b'),
ramdisk_id=_fake_id('c'),
root_device_name='/dev/vda',
block_device_mapping=bdm)
original_image = dict(properties=props,
container_format='ami',
status='active',
is_public=True)
image_service.create(None, original_image)
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref=original_image['id'],
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake')
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
self.mox.ReplayAll()
req = fakes.HTTPRequestV3.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace(glance.generate_image_url(''), '')
image = image_service.show(None, image_id)
self.assertEqual(image['name'], 'snapshot_of_volume_backed')
properties = image['properties']
self.assertEqual(properties['kernel_id'], _fake_id('b'))
self.assertEqual(properties['ramdisk_id'], _fake_id('c'))
self.assertEqual(properties['root_device_name'], '/dev/vda')
self.assertEqual(properties['bdm_v2'], True)
bdms = properties['block_device_mapping']
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['boot_index'], 0)
self.assertEqual(bdms[0]['source_type'], 'snapshot')
self.assertEqual(bdms[0]['destination_type'], 'volume')
self.assertEqual(bdms[0]['snapshot_id'], snapshot['id'])
for fld in ('connection_info', 'id',
'instance_uuid', 'device_name'):
self.assertNotIn(fld, bdms[0])
for k in extra_properties.keys():
self.assertEqual(properties[k], extra_properties[k])
def test_create_volume_backed_image_no_metadata(self):
self._do_test_create_volume_backed_image({})
def test_create_volume_backed_image_with_metadata(self):
self._do_test_create_volume_backed_image(dict(ImageType='Gold',
ImageVersion='2.0'))
def _test_create_volume_backed_image_with_metadata_from_volume(
self, extra_metadata=None):
def _fake_id(x):
return '%s-%s-%s-%s' % (x * 8, x * 4, x * 4, x * 12)
body = dict(create_image=dict(name='snapshot_of_volume_backed'))
if extra_metadata:
body['create_image']['metadata'] = extra_metadata
image_service = glance.get_default_image_service()
def fake_block_device_mapping_get_all_by_instance(context, inst_id,
use_slave=False):
return [fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': _fake_id('a'),
'source_type': 'snapshot',
'destination_type': 'volume',
'volume_size': 1,
'device_name': 'vda',
'snapshot_id': 1,
'boot_index': 0,
'delete_on_termination': False,
'no_device': None})]
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
fake_block_device_mapping_get_all_by_instance)
instance = fakes.fake_instance_get(image_ref='',
vm_state=vm_states.ACTIVE,
root_device_name='/dev/vda')
self.stubs.Set(db, 'instance_get_by_uuid', instance)
fake_metadata = {'test_key1': 'test_value1',
'test_key2': 'test_value2'}
volume = dict(id=_fake_id('a'),
size=1,
host='fake',
display_description='fake',
volume_image_metadata=fake_metadata)
snapshot = dict(id=_fake_id('d'))
self.mox.StubOutWithMock(self.controller.compute_api, 'volume_api')
volume_api = self.controller.compute_api.volume_api
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.get(mox.IgnoreArg(), volume['id']).AndReturn(volume)
volume_api.create_snapshot_force(mox.IgnoreArg(), volume['id'],
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(snapshot)
req = fakes.HTTPRequestV3.blank(self.url)
self.mox.ReplayAll()
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
image_id = location.replace('http://localhost:9292/images/', '')
image = image_service.show(None, image_id)
properties = image['properties']
self.assertEqual(properties['test_key1'], 'test_value1')
self.assertEqual(properties['test_key2'], 'test_value2')
if extra_metadata:
for key, val in extra_metadata.items():
self.assertEqual(properties[key], val)
def test_create_vol_backed_img_with_meta_from_vol_without_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume()
def test_create_vol_backed_img_with_meta_from_vol_with_extra_meta(self):
self._test_create_volume_backed_image_with_metadata_from_volume(
extra_metadata={'a': 'b'})
def test_create_image_snapshots_disabled(self):
"""Don't permit a snapshot if the allow_instance_snapshots flag is
False
"""
self.flags(allow_instance_snapshots=False)
body = {
'create_image': {
'name': 'Snapshot 1',
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_with_metadata(self):
body = {
'create_image': {
'name': 'Snapshot 1',
'metadata': {'key': 'asdf'},
},
}
req = fakes.HTTPRequestV3.blank(self.url)
response = self.controller._action_create_image(req, FAKE_UUID, body)
location = response.headers['Location']
self.assertEqual(glance.generate_image_url('123'), location)
def test_create_image_with_too_much_metadata(self):
body = {
'create_image': {
'name': 'Snapshot 1',
'metadata': {},
},
}
for num in range(CONF.quota_metadata_items + 1):
body['create_image']['metadata']['foo%i' % num] = "bar"
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_no_name(self):
body = {
'create_image': {},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_blank_name(self):
body = {
'create_image': {
'name': '',
}
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_bad_metadata(self):
body = {
'create_image': {
'name': 'geoff',
'metadata': 'henry',
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._action_create_image,
req, FAKE_UUID, body)
def test_create_image_raises_conflict_on_invalid_state(self):
def snapshot(*args, **kwargs):
raise exception.InstanceInvalidState(attr='fake_attr',
state='fake_state', method='fake_method',
instance_uuid='fake')
self.stubs.Set(compute_api.API, 'snapshot', snapshot)
body = {
"create_image": {
"name": "test_snapshot",
},
}
req = fakes.HTTPRequestV3.blank(self.url)
self.assertRaises(webob.exc.HTTPConflict,
self.controller._action_create_image,
req, FAKE_UUID, body)
| {
"content_hash": "a1a729242f3f9886f49dbc2d64918580",
"timestamp": "",
"source": "github",
"line_count": 1085,
"max_line_length": 79,
"avg_line_length": 38.747465437788016,
"alnum_prop": 0.5553864085059823,
"repo_name": "viggates/nova",
"id": "7b1e98638c5fccb70eba78be0ae9d3b053f2d9e4",
"size": "42677",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/api/openstack/compute/plugins/v3/test_server_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14822788"
},
{
"name": "Shell",
"bytes": "18352"
}
],
"symlink_target": ""
} |
from robot import model, utils
from .message import Message
class Keyword(model.Keyword):
"""Results of a single keyword."""
__slots__ = ['kwname', 'libname', 'status', 'starttime', 'endtime', 'message']
message_class = Message
def __init__(self, kwname='', libname='', doc='', args=(), assign=(),
timeout='', type='kw', status='FAIL', starttime=None,
endtime=None):
model.Keyword.__init__(self, '', doc, args, assign, timeout, type)
#: Name of the keyword without library or resource name.
self.kwname = kwname
#: Name of library or resource containing this keyword.
self.libname = libname
#: String 'PASS' or 'FAIL'.
self.status = status
#: Keyword execution start time in format ``%Y%m%d %H:%M:%S.%f``.
self.starttime = starttime
#: Keyword execution end time in format ``%Y%m%d %H:%M:%S.%f``.
self.endtime = endtime
#: Keyword status message. Used only with suite teardowns.
self.message = ''
@property
def elapsedtime(self):
"""Elapsed execution time of the keyword in milliseconds."""
return utils.get_elapsed_time(self.starttime, self.endtime)
@property
def name(self):
if not self.libname:
return self.kwname
return '%s.%s' % (self.libname, self.kwname)
@property
def passed(self):
"""``True`` if the keyword did pass, ``False`` otherwise."""
return self.status == 'PASS'
| {
"content_hash": "bf439ef9f6a81286cad242d9aa5be028",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 82,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.5884665792922673,
"repo_name": "kyle1986/robortframe",
"id": "9f4fd31e60b429c126b77758b9b412a2fb594407",
"size": "2134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/result/keyword.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "210"
},
{
"name": "CSS",
"bytes": "22850"
},
{
"name": "HTML",
"bytes": "137580"
},
{
"name": "Java",
"bytes": "59216"
},
{
"name": "JavaScript",
"bytes": "160117"
},
{
"name": "Python",
"bytes": "2072305"
},
{
"name": "RobotFramework",
"bytes": "1929991"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
} |
"""
Is my mongo exposed?
"""
import os
import sys
import urwid
from .version import __version__
from .cards import Cards
from .palette import PALETTE
from .tools import check_version
class App(object):
def __init__(self):
self.name = 'mongoaudit'
self.version = __version__
check_version(self.version)
urwid.set_encoding("UTF-8")
self.cards = Cards(self)
self.setup_view()
self.main()
def setup_view(self):
placeholder = urwid.SolidFill()
self.loop = urwid.MainLoop(
placeholder, PALETTE, unhandled_input=self.key_handler)
self.loop.widget = urwid.AttrMap(placeholder, 'bg')
#self.loop.widget._command_map['tab'] = 'cursor down'
#self.loop.widget._command_map['shift tab'] = 'cursor up'
self.loop.screen.set_terminal_properties(colors=256)
self.cards.welcome()
def render(self, card):
div = urwid.Divider()
rdiv = urwid.AttrMap(div, 'header')
header = urwid.Filler(urwid.Pile(
[rdiv, rdiv, rdiv, rdiv, rdiv]), valign='top')
h1_text = urwid.Text(('h1', self.name))
h2_text = urwid.Text(('h2', 'v' + self.version), align='right')
hg_text = urwid.AttrMap(urwid.Padding(urwid.Columns(
[h1_text, h2_text]), left=2, right=2, align='center'), 'header')
body = urwid.Pile([hg_text, rdiv, card, div])
widget = urwid.Overlay(body, header, 'center', 76, 'top', 'pack', top=1)
self.loop.widget.original_widget = widget
@staticmethod
def key_handler(key):
if key in ('q', 'Q', 'esc'):
raise urwid.ExitMainLoop()
elif key == 'ctrl r':
python = sys.executable
os.execl(python, python, *sys.argv)
def main(self):
try:
self.loop.run()
except KeyboardInterrupt:
return 0
def main():
App().main()
if __name__ == "__main__":
main()
| {
"content_hash": "aa598d2f1b5293f4d6c3e12f1202efad",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 29.78787878787879,
"alnum_prop": 0.5783316378433367,
"repo_name": "stampery/mongoaudit",
"id": "094916c4b9c9f3e3576c9408792d908eb3a8b916",
"size": "1991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongoaudit/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "260"
},
{
"name": "Python",
"bytes": "56366"
}
],
"symlink_target": ""
} |
import json
import logging
from google.appengine.api import urlfetch
from google.appengine.runtime import apiproxy_errors
from base import constants
def BuildUrl(master_name, url, use_cbe=False):
base = constants.BUILDBOT_BASE_URL
if use_cbe:
base = constants.CBE_BASE_URL
url += '?json=1'
return '%s/%s/%s' % (base, master_name, url)
def FetchData(url):
try:
return json.loads(FetchText(url))
except ValueError:
logging.warning('Data is corrupt: %s', url)
raise
def FetchText(url):
logging.debug('Retrieving %s', url)
try:
return urlfetch.fetch(url).content
except (apiproxy_errors.DeadlineExceededError, urlfetch.DownloadError,
urlfetch.InternalTransientError):
# Could be intermittent; try again.
try:
return urlfetch.fetch(url).content
except:
logging.error('Error retrieving URL: %s', url)
raise
except:
logging.error('Error retrieving URL: %s', url)
raise
| {
"content_hash": "19c047d53be12525899befd4f119c804",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 23.4390243902439,
"alnum_prop": 0.6940686784599376,
"repo_name": "sahiljain/catapult",
"id": "aac6838cae7b010167dfd6661e9cbaff8f9cd2f6",
"size": "1124",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "firefighter/update/common/buildbot/network.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3598"
},
{
"name": "C++",
"bytes": "6390"
},
{
"name": "CSS",
"bytes": "24751"
},
{
"name": "HTML",
"bytes": "14570791"
},
{
"name": "JavaScript",
"bytes": "511007"
},
{
"name": "Python",
"bytes": "5842419"
},
{
"name": "Shell",
"bytes": "2834"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from app import startup_app
from app.model.pgadapter import PgAdapter
class BaseApiTestCase(TestCase):
# constants
CONTENT_TYPE = 'application/json'
def setUp(self):
self.db = PgAdapter()
self.endpoint = ''
self.teardown_query = ''
def tearDown(self):
if self.teardown_query:
self.db.execute(self.teardown_query)
self.db.commit()
self.db = None
@classmethod
def setUpClass(cls):
app = startup_app()
cls.client = app.test_client()
@classmethod
def tearDownClass(cls):
cls.client = None
def init_data(self):
raise NotImplementedError()
| {
"content_hash": "f458bdca3da286773eb25f2f4c2bc75b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 48,
"avg_line_length": 22.677419354838708,
"alnum_prop": 0.6173541963015647,
"repo_name": "KentaYamada/Siphon",
"id": "0a83d33c71b6f7020e537dd0fa0d6c87098e2deb",
"size": "703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tests/controller/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "97"
},
{
"name": "C#",
"bytes": "54055"
},
{
"name": "CSS",
"bytes": "322"
},
{
"name": "JavaScript",
"bytes": "4968"
},
{
"name": "PLSQL",
"bytes": "140"
},
{
"name": "SQLPL",
"bytes": "2028"
}
],
"symlink_target": ""
} |
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
grouping of code relating to a portion of the system. In general other
components should be using the manager to make changes to the components that
it is responsible for.
For example, other components that need to deal with volumes in some way,
should do so by calling methods on the VolumeManager instead of directly
changing fields in the database. This allows us to keep all of the code
relating to volumes in the same place.
We have adopted a basic strategy of Smart managers and dumb data, which means
rather than attaching methods to data objects, components should call manager
methods that act on the data.
Methods on managers that can be executed locally should be called directly. If
a particular method must execute on a remote host, this should be done via rpc
to the service that wraps the manager
Managers should be responsible for most of the db access, and
non-implementation specific data. Anything implementation specific that can't
be generalized should be done by the Driver.
In general, we prefer to have one manager with multiple drivers for different
implementations, but sometimes it makes sense to have multiple managers. You
can think of it this way: Abstract different overall strategies at the manager
level(FlatNetwork vs VlanNetwork), and different implementations at the driver
level(LinuxNetDriver vs CiscoNetDriver).
Managers will often provide methods for initial setup of a host or periodic
tasks to a wrapping service.
This module provides Manager, a base class for managers.
"""
from oslo_service import periodic_task
import nova.conf
import nova.db.main.api
from nova import profiler
from nova import rpc
CONF = nova.conf.CONF
class PeriodicTasks(periodic_task.PeriodicTasks):
def __init__(self):
super(PeriodicTasks, self).__init__(CONF)
class ManagerMeta(profiler.get_traced_meta(), type(PeriodicTasks)):
"""Metaclass to trace all children of a specific class.
This metaclass wraps every public method (not starting with _ or __)
of the class using it. All children classes of the class using ManagerMeta
will be profiled as well.
Adding this metaclass requires that the __trace_args__ attribute be added
to the class we want to modify. That attribute is a dictionary
with one mandatory key: "name". "name" defines the name
of the action to be traced (for example, wsgi, rpc, db).
The OSprofiler-based tracing, although, will only happen if profiler
instance was initiated somewhere before in the thread, that can only happen
if profiling is enabled in nova.conf and the API call to Nova API contained
specific headers.
"""
class Manager(PeriodicTasks, metaclass=ManagerMeta):
__trace_args__ = {"name": "rpc"}
def __init__(self, host=None, service_name='undefined'):
if not host:
host = CONF.host
self.host = host
self.backdoor_port = None
self.service_name = service_name
self.notifier = rpc.get_notifier(self.service_name, self.host)
self.additional_endpoints = []
super(Manager, self).__init__()
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
def init_host(self):
"""Hook to do additional manager initialization when one requests
the service be started. This is called before any service record
is created.
Child classes should override this method.
"""
pass
def cleanup_host(self):
"""Hook to do cleanup work when the service shuts down.
Child classes should override this method.
"""
pass
def pre_start_hook(self):
"""Hook to provide the manager the ability to do additional
start-up work before any RPC queues/consumers are created. This is
called after other initialization has succeeded and a service
record is created.
Child classes should override this method.
"""
pass
def post_start_hook(self):
"""Hook to provide the manager the ability to do additional
start-up work immediately after a service creates RPC consumers
and starts 'running'.
Child classes should override this method.
"""
pass
def reset(self):
"""Hook called on SIGHUP to signal the manager to re-read any
dynamic configuration or do any reconfiguration tasks.
"""
pass
| {
"content_hash": "88eaacb4407135f46459c6154653644f",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 79,
"avg_line_length": 36.054263565891475,
"alnum_prop": 0.7153300365512792,
"repo_name": "mahak/nova",
"id": "9c00401b96e166c815c36bcf6d88721b66e53529",
"size": "5383",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
} |
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
ops.NotDifferentiable("TensorListConcat")
ops.NotDifferentiable("TensorListPushBackBatch")
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult, element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = gen_list_ops.empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
return gen_list_ops.tensor_list_push_back(dlist, delement)
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return gen_list_ops.tensor_list_from_tensor(dtensor,
element_shape=dtensor.shape[1:])
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
if op.inputs[0].shape.dims[0].value is not None:
num_elements = op.inputs[0].shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = gen_list_ops.empty_tensor_list(
element_dtype=op.inputs[0].dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist, element_dtype=op.inputs[0].dtype, num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
return list_grad, index_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = gen_list_ops.tensor_list_get_item(
dlist, index, element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
_, indices = op.inputs
return gen_list_ops.tensor_list_scatter(
tensor=dtensor, indices=indices,
element_shape=ops.convert_to_tensor(-1, dtype=dtypes.int32)), None
@ops.RegisterGradient("TensorListScatter")
def _TensorListScatterGrad(op, dlist):
t, indices, _ = op.inputs
return gen_list_ops.tensor_list_gather(
dlist, indices, element_dtype=t.dtype), None
| {
"content_hash": "3c9fb0107a0cd47cf82e4e08a5166b64",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 34.35,
"alnum_prop": 0.7082969432314411,
"repo_name": "alshedivat/tensorflow",
"id": "386626e6a951b189f5e77bdf7b9a308b60c1c842",
"size": "4124",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/list_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
"""
Asynchronous Shared-Memory Scheduler for Dask Graphs.
This scheduler coordinates several workers to execute tasks in a dask graph in
parallel. It depends on an apply_async function as would be found in thread or
process Pools and a corresponding Queue for worker-to-scheduler communication.
It tries to execute tasks in an order which maintains a small memory footprint
throughout execution. It does this by running tasks that allow us to release
data resources.
Task Selection Policy
=====================
When we complete a task we add more data in to our set of available data; this
new data makes new tasks available. We preferentially choose tasks that were
just made available in a last-in-first-out fashion. We implement this as a
simple stack. This results in more depth-first rather than breadth first
behavior which encourages us to process batches of data to completion before
starting in on new data when possible.
When the addition of new data readies multiple tasks simultaneously we add
tasks to the stack in sorted order so that tasks with greater keynames are run
first. This can be handy to break ties in a predictable fashion.
State
=====
Many functions pass around a ``state`` variable that holds the current state of
the computation. This variable consists of several other dictionaries and
sets, explained below.
Constant state
--------------
1. dependencies: {x: [a, b ,c]} a,b,c, must be run before x
2. dependents: {a: [x, y]} a must run before x or y
Changing state
--------------
### Data
1. cache: available concrete data. {key: actual-data}
2. released: data that we've seen, used, and released because it is no longer
needed
### Jobs
1. ready: A fifo stack of ready-to-run tasks
2. running: A set of tasks currently in execution
3. finished: A set of finished tasks
4. waiting: which tasks are still waiting on others :: {key: {keys}}
Real-time equivalent of dependencies
5. waiting_data: available data to yet-to-be-run-tasks :: {key: {keys}}
Real-time equivalent of dependents
Examples
--------
>>> import pprint
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> pprint.pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': ['z'],
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
Optimizations
=============
We build this scheduler with out-of-core array operations in mind. To this end
we have encoded some particular optimizations.
Compute to release data
-----------------------
When we choose a new task to execute we often have many options. Policies at
this stage are cheap and can significantly impact performance. One could
imagine policies that expose parallelism, drive towards a particular output,
etc..
Our current policy is to run tasks that were most recently made available.
Inlining computations
---------------------
We hold on to intermediate computations either in memory or on disk.
For very cheap computations that may emit new copies of the data, like
``np.transpose`` or possibly even ``x + 1`` we choose not to store these as
separate pieces of data / tasks. Instead we combine them with the computations
that require them. This may result in repeated computation but saves
significantly on space and computation complexity.
See the function ``inline_functions`` for more information.
"""
from __future__ import absolute_import, division, print_function
from operator import add
import sys
import traceback
from .core import (istask, flatten, reverse_dict, get_dependencies, ishashable,
_deps)
from .context import _globals
from .order import order
from .callbacks import unpack_callbacks
from .optimize import cull
def inc(x):
return x + 1
DEBUG = False
def start_state_from_dask(dsk, cache=None, sortkey=None):
""" Start state from a dask
Examples
--------
>>> dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}
>>> from pprint import pprint
>>> pprint(start_state_from_dask(dsk)) # doctest: +NORMALIZE_WHITESPACE
{'cache': {'x': 1, 'y': 2},
'dependencies': {'w': set(['y', 'z']),
'x': set([]),
'y': set([]),
'z': set(['x'])},
'dependents': {'w': set([]),
'x': set(['z']),
'y': set(['w']),
'z': set(['w'])},
'finished': set([]),
'ready': ['z'],
'released': set([]),
'running': set([]),
'waiting': {'w': set(['z'])},
'waiting_data': {'x': set(['z']),
'y': set(['w']),
'z': set(['w'])}}
"""
if sortkey is None:
sortkey = order(dsk).get
if cache is None:
cache = _globals['cache']
if cache is None:
cache = dict()
data_keys = set()
for k, v in dsk.items():
if not (istask(v) or _deps(dsk, v)):
cache[k] = v
data_keys.add(k)
dsk2 = dsk.copy()
dsk2.update(cache)
dependencies = dict((k, get_dependencies(dsk2, k)) for k in dsk)
waiting = dict((k, v.copy()) for k, v in dependencies.items()
if k not in data_keys)
dependents = reverse_dict(dependencies)
for a in cache:
for b in dependents.get(a, ()):
waiting[b].remove(a)
waiting_data = dict((k, v.copy()) for k, v in dependents.items() if v)
ready_set = set([k for k, v in waiting.items() if not v])
ready = sorted(ready_set, key=sortkey, reverse=True)
waiting = dict((k, v) for k, v in waiting.items() if v)
state = {'dependencies': dependencies,
'dependents': dependents,
'waiting': waiting,
'waiting_data': waiting_data,
'cache': cache,
'ready': ready,
'running': set(),
'finished': set(),
'released': set()}
return state
'''
Running tasks
-------------
When we execute tasks we both
1. Perform the actual work of collecting the appropriate data and calling the function
2. Manage administrative state to coordinate with the scheduler
'''
def _execute_task(arg, cache, dsk=None):
""" Do the actual work of collecting data and executing a function
Examples
--------
>>> cache = {'x': 1, 'y': 2}
Compute tasks against a cache
>>> _execute_task((add, 'x', 1), cache) # Compute task in naive manner
2
>>> _execute_task((add, (inc, 'x'), 1), cache) # Support nested computation
3
Also grab data from cache
>>> _execute_task('x', cache)
1
Support nested lists
>>> list(_execute_task(['x', 'y'], cache))
[1, 2]
>>> list(map(list, _execute_task([['x', 'y'], ['y', 'x']], cache)))
[[1, 2], [2, 1]]
>>> _execute_task('foo', cache) # Passes through on non-keys
'foo'
"""
if isinstance(arg, list):
return [_execute_task(a, cache) for a in arg]
elif istask(arg):
func, args = arg[0], arg[1:]
args2 = [_execute_task(a, cache) for a in args]
return func(*args2)
elif not ishashable(arg):
return arg
elif arg in cache:
return cache[arg]
else:
return arg
def execute_task(key, task, data, queue, get_id, raise_on_exception=False):
"""
Compute task and handle all administration
See Also
--------
_execute_task - actually execute task
"""
try:
result = _execute_task(task, data)
id = get_id()
result = key, result, None, id
except Exception as e:
if raise_on_exception:
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
result = key, e, tb, None
try:
queue.put(result)
except Exception as e:
if raise_on_exception:
raise
exc_type, exc_value, exc_traceback = sys.exc_info()
tb = ''.join(traceback.format_tb(exc_traceback))
queue.put((key, e, tb, None))
def release_data(key, state, delete=True):
""" Remove data from temporary storage
See Also
finish_task
"""
if key in state['waiting_data']:
assert not state['waiting_data'][key]
del state['waiting_data'][key]
state['released'].add(key)
if delete:
del state['cache'][key]
def finish_task(dsk, key, state, results, sortkey, delete=True,
release_data=release_data):
"""
Update execution state after a task finishes
Mutates. This should run atomically (with a lock).
"""
for dep in sorted(state['dependents'][key], key=sortkey, reverse=True):
s = state['waiting'][dep]
s.remove(key)
if not s:
del state['waiting'][dep]
state['ready'].append(dep)
for dep in state['dependencies'][key]:
if dep in state['waiting_data']:
s = state['waiting_data'][dep]
s.remove(key)
if not s and dep not in results:
if DEBUG:
from chest.core import nbytes
print("Key: %s\tDep: %s\t NBytes: %.2f\t Release" % (key, dep,
sum(map(nbytes, state['cache'].values()) / 1e6)))
release_data(dep, state, delete=delete)
elif delete and dep not in results:
release_data(dep, state, delete=delete)
state['finished'].add(key)
state['running'].remove(key)
return state
def nested_get(ind, coll):
""" Get nested index from collection
Examples
--------
>>> nested_get(1, 'abc')
'b'
>>> nested_get([1, 0], 'abc')
('b', 'a')
>>> nested_get([[1, 0], [0, 1]], 'abc')
(('b', 'a'), ('a', 'b'))
"""
if isinstance(ind, list):
return tuple([nested_get(i, coll) for i in ind])
else:
return coll[ind]
def default_get_id():
"""Default get_id"""
return None
'''
Task Selection
--------------
We often have a choice among many tasks to run next. This choice is both
cheap and can significantly impact performance.
We currently select tasks that have recently been made ready. We hope that
this first-in-first-out policy reduces memory footprint
'''
'''
`get`
-----
The main function of the scheduler. Get is the main entry point.
'''
def get_async(apply_async, num_workers, dsk, result, cache=None,
queue=None, get_id=default_get_id, raise_on_exception=False,
rerun_exceptions_locally=None, callbacks=None, **kwargs):
""" Asynchronous get function
This is a general version of various asynchronous schedulers for dask. It
takes a an apply_async function as found on Pool objects to form a more
specific ``get`` method that walks through the dask array with parallel
workers, avoiding repeat computation and minimizing memory use.
Parameters
----------
apply_async : function
Asynchronous apply function as found on Pool or ThreadPool
num_workers : int
The number of active tasks we should have at any one time
dsk: dict
A dask dictionary specifying a workflow
result : key or list of keys
Keys corresponding to desired data
cache : dict-like, optional
Temporary storage of results
get_id : callable, optional
Function to return the worker id, takes no arguments. Examples are
`threading.current_thread` and `multiprocessing.current_process`.
rerun_exceptions_locally : bool, optional
Whether to rerun failing tasks in local process to enable debugging
(False by default)
callbacks : tuple or list of tuples, optional
Callbacks are passed in as tuples of length 4. Multiple sets of
callbacks may be passed in as a list of tuples. For more information,
see the dask.diagnostics documentation.
See Also
--------
threaded.get
"""
assert queue
if callbacks is None:
callbacks = _globals['callbacks']
start_cbs, start_state_cbs, pretask_cbs, posttask_cbs, finish_cbs = unpack_callbacks(callbacks)
if isinstance(result, list):
result_flat = set(flatten(result))
else:
result_flat = set([result])
results = set(result_flat)
dsk = dsk.copy()
for f in start_cbs:
f(dsk)
dsk, dependencies = cull(dsk, list(results))
keyorder = order(dsk)
state = start_state_from_dask(dsk, cache=cache, sortkey=keyorder.get)
for f in start_state_cbs:
f(dsk, state)
if rerun_exceptions_locally is None:
rerun_exceptions_locally = _globals.get('rerun_exceptions_locally', False)
if state['waiting'] and not state['ready']:
raise ValueError("Found no accessible jobs in dask")
def fire_task():
""" Fire off a task to the thread pool """
# Choose a good task to compute
key = state['ready'].pop()
state['running'].add(key)
for f in pretask_cbs:
f(key, dsk, state)
# Prep data to send
data = dict((dep, state['cache'][dep])
for dep in get_dependencies(dsk, key))
# Submit
apply_async(execute_task, args=[key, dsk[key], data, queue,
get_id, raise_on_exception])
# Seed initial tasks into the thread pool
while state['ready'] and len(state['running']) < num_workers:
fire_task()
# Main loop, wait on tasks to finish, insert new ones
while state['waiting'] or state['ready'] or state['running']:
try:
key, res, tb, worker_id = queue.get()
except KeyboardInterrupt:
for f in finish_cbs:
f(dsk, state, True)
raise
if isinstance(res, Exception):
for f in finish_cbs:
f(dsk, state, True)
if rerun_exceptions_locally:
data = dict((dep, state['cache'][dep])
for dep in get_dependencies(dsk, key))
task = dsk[key]
_execute_task(task, data) # Re-execute locally
else:
raise(remote_exception(res, tb))
state['cache'][key] = res
finish_task(dsk, key, state, results, keyorder.get)
for f in posttask_cbs:
f(key, res, dsk, state, worker_id)
while state['ready'] and len(state['running']) < num_workers:
fire_task()
# Final reporting
while state['running'] or not queue.empty():
key, res, tb, worker_id = queue.get()
for f in finish_cbs:
f(dsk, state, False)
return nested_get(result, state['cache'])
""" Synchronous concrete version of get_async
Usually we supply a multi-core apply_async function. Here we provide a
sequential one. This is useful for debugging and for code dominated by the
GIL
"""
def apply_sync(func, args=(), kwds={}):
""" A naive synchronous version of apply_async """
return func(*args, **kwds)
def get_sync(dsk, keys, **kwargs):
from .compatibility import Queue
kwargs.pop('num_workers', None) # if num_workers present, remove it
queue = Queue()
return get_async(apply_sync, 1, dsk, keys, queue=queue,
raise_on_exception=True, **kwargs)
def sortkey(item):
""" Sorting key function that is robust to different types
Both strings and tuples are common key types in dask graphs.
However In Python 3 one can not compare strings with tuples directly.
This function maps many types to a form where they can be compared
Examples
--------
>>> sortkey('Hello')
('str', 'Hello')
>>> sortkey(('x', 1))
('tuple', ('x', 1))
"""
return (type(item).__name__, item)
"""
Remote Exceptions
-----------------
We want the following behaviors from remote exceptions
1. Include the original error message
2. Respond to try-except blocks with original error type
3. Include remote traceback
"""
class RemoteException(Exception):
""" Remote Exception
Contains the exception and traceback from a remotely run task
"""
def __init__(self, exception, traceback):
self.exception = exception
self.traceback = traceback
def __str__(self):
return (str(self.exception) + "\n\n"
"Traceback\n"
"---------\n" +
self.traceback)
def __dir__(self):
return sorted(set(dir(type(self)) +
list(self.__dict__) +
dir(self.exception)))
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return getattr(self.exception, key)
exceptions = dict()
def remote_exception(exc, tb):
""" Metaclass that wraps exception type in RemoteException """
if type(exc) in exceptions:
typ = exceptions[type(exc)]
return typ(exc, tb)
else:
try:
typ = type(exc.__class__.__name__,
(RemoteException, type(exc)),
{'exception_type': type(exc)})
exceptions[type(exc)] = typ
return typ(exc, tb)
except TypeError:
return exc
| {
"content_hash": "884dd338f46adbb937d45ead1e0c53e0",
"timestamp": "",
"source": "github",
"line_count": 595,
"max_line_length": 99,
"avg_line_length": 29.712605042016808,
"alnum_prop": 0.5918321172011992,
"repo_name": "mikegraham/dask",
"id": "a64bc4ce6227000ecfef8d5af475df9e91f21086",
"size": "17679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/async.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1187699"
}
],
"symlink_target": ""
} |
import math
import Queue
def make(filename):
file = open(filename, "r")
linenum = 0
verts = 0
edges = 0
graph = None
for line in file:
# print ("line " + line)
values = line.split("\t")
# print ("values " + str(values))
# strip the wack n if present
try:
for i in values:
# print ("i " + i)
i = int(str(i).strip("\n"))
except Exception as ex:
print("\nError parsing the graph file. This is probably from having spaces instead of tabs.")
print("Exiting...\n")
# print(ex)
raise ex
# if first get graph verts n edges
if linenum == 0:
verts = values[0]
edges = values[1]
graph = Graph(int(verts), int(edges))
else: # else connect the verts
a = int(values[0])
b = int(values[1])
graph.connect(a, b)
linenum += 1
file.close()
return graph
class GraphException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
### not used, just messing with python overloading
class Matrix:
def __init__(self, r, c):
self.rows = r
self.cols = c
self.data = [[ 0 for x in range(self.cols)] for y in range(self.rows)]
def __getitem__(self, key):
print ("key: " + str(key))
return self.data[key]
def __setitem__(self, key, value):
print ("set key: " + str(key) + " val: " + str(value))
self.data[key] = value
def output(self):
for i in range(self.rows):
row = ""
for j in range (self.cols):
row += (str(self.data[i][j]) + " ")
print ( row + "\n")
def set(self, a, b, val):
self.data[a][b] = val
def fill(self, value):
for i in range(self.rows):
for j in range(self.cols):
self.set(i,j,value)
class Graph:
def __init__(self, vs, es):
self.verts = vs
self.edges = es
self.data = [[ 0 for x in range(self.verts)] for y in range(self.verts)]
def __getitem__(self, key):
return self.data[key]
def output(self):
for i in range(self.verts):
row = ""
for j in range (self.verts):
row += (str(self.data[i][j]) + " ")
print ( row + "\n")
def connect(self, a, b):
self.data[a][b] = 1
self.data[b][a] = 1
def remove(self, a, b):
self.data[a][b] = 0
self.data[b][a] = 0
def density(self):
if ( self.edges == 0 and self.verts == 0):
return 0
else:
top = 2 * float(self.edges)
bottom = float(self.verts) * float(self.verts - 1)
return round((top/bottom), 5)
# run a bfs
def bfs(self, start):
visited = list()
queue = Queue.Queue()
queue.put(start)
while not queue.empty():
vert = queue.get()
if ( vert not in visited ):
visited.append(vert)
for index in range(0,len(self.data[vert])) :
if ( self.data[vert][index] == 1 ):
queue.put(index)
return visited
# run a dfs
def dfs(self, start):
visited = list()
stack = list()
stack.append(start)
while len(stack):
vert = stack.pop()
if vert not in visited :
visited.append(vert)
for index in range(0,len(self.data[vert])) :
if ( self.data[vert][index] == 1 ):
stack.append(index)
return visited
def comps(self):
ret = set()
seen = set()
while ( len(seen) != len(self.data) ):
for index in range(0, len(self.data[0])):
if index not in seen:
conns = frozenset(self.dfs(index))
seen = seen | conns
ret.add(conns)
return ret
def degree(self, switch):
target = 0
if (switch == "min"):
target = self.verts - 1
if ( target < 0 ):
target = 0
for i in range(self.verts):
tmp = 0
for j in range(self.verts):
tmp += self.data[i][j]
if (switch == "max"):
if (tmp > target):
target = tmp
elif(switch == "min"):
if ( tmp < target):
target = tmp
else:
print (GraphException("Invalid switch passed to degree."))
return target
| {
"content_hash": "85f311a6c5720eebb3e00e749f69c78a",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 106,
"avg_line_length": 29.242424242424242,
"alnum_prop": 0.4584455958549223,
"repo_name": "jeremy24/494-graph-algos",
"id": "c04e3f4c7fab428ac65dd450dcb4e15afa7850e9",
"size": "4825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/hw2/graph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "87"
},
{
"name": "C",
"bytes": "114"
},
{
"name": "Haskell",
"bytes": "1722"
},
{
"name": "Python",
"bytes": "139237"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
from django.http import Http404
from django.core import serializers
from django.shortcuts import redirect
from django.forms.models import model_to_dict
from django.urls import reverse_lazy, reverse
from django.http import HttpResponse, JsonResponse
from django.views.generic import ListView, DetailView, View, CreateView, UpdateView, DeleteView, TemplateView
from django.views.generic.edit import FormView
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin
from admin.base import settings
from admin.base.forms import ImportFileForm
from admin.institutions.forms import InstitutionForm, InstitutionalMetricsAdminRegisterForm
from django.contrib.auth.models import Group
from osf.models import Institution, Node, OSFUser
class InstitutionList(PermissionRequiredMixin, ListView):
paginate_by = 25
template_name = 'institutions/list.html'
ordering = 'name'
permission_required = 'osf.view_institution'
raise_exception = True
model = Institution
def get_queryset(self):
return Institution.objects.all().order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(query_set, page_size)
kwargs.setdefault('institutions', query_set)
kwargs.setdefault('page', page)
kwargs.setdefault('logohost', settings.OSF_URL)
return super(InstitutionList, self).get_context_data(**kwargs)
class InstitutionDisplay(PermissionRequiredMixin, DetailView):
model = Institution
template_name = 'institutions/detail.html'
permission_required = 'osf.view_institution'
raise_exception = True
def get_object(self, queryset=None):
return Institution.objects.get(id=self.kwargs.get('institution_id'))
def get_context_data(self, *args, **kwargs):
institution = self.get_object()
institution_dict = model_to_dict(institution)
kwargs.setdefault('page_number', self.request.GET.get('page', '1'))
kwargs['institution'] = institution_dict
kwargs['logohost'] = settings.OSF_URL
fields = institution_dict
kwargs['change_form'] = InstitutionForm(initial=fields)
kwargs['import_form'] = ImportFileForm()
kwargs['node_count'] = institution.nodes.count()
return kwargs
class InstitutionDetail(PermissionRequiredMixin, View):
permission_required = 'osf.view_institution'
raise_exception = True
def get(self, request, *args, **kwargs):
view = InstitutionDisplay.as_view()
return view(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
view = InstitutionChangeForm.as_view()
return view(request, *args, **kwargs)
class ImportInstitution(PermissionRequiredMixin, View):
permission_required = 'osf.change_institution'
raise_exception = True
def post(self, request, *args, **kwargs):
form = ImportFileForm(request.POST, request.FILES)
if form.is_valid():
file_str = self.parse_file(request.FILES['file'])
file_json = json.loads(file_str)
return JsonResponse(file_json[0]['fields'])
def parse_file(self, f):
parsed_file = ''
for chunk in f.chunks():
if isinstance(chunk, bytes):
chunk = chunk.decode()
parsed_file += chunk
return parsed_file
class InstitutionChangeForm(PermissionRequiredMixin, UpdateView):
permission_required = 'osf.change_institution'
raise_exception = True
model = Institution
form_class = InstitutionForm
def get_object(self, queryset=None):
provider_id = self.kwargs.get('institution_id')
return Institution.objects.get(id=provider_id)
def get_context_data(self, *args, **kwargs):
kwargs['import_form'] = ImportFileForm()
return super(InstitutionChangeForm, self).get_context_data(*args, **kwargs)
def get_success_url(self, *args, **kwargs):
return reverse_lazy('institutions:detail', kwargs={'institution_id': self.kwargs.get('institution_id')})
class InstitutionExport(PermissionRequiredMixin, View):
permission_required = 'osf.view_institution'
raise_exception = True
def get(self, request, *args, **kwargs):
institution = Institution.objects.get(id=self.kwargs['institution_id'])
data = serializers.serialize('json', [institution])
filename = '{}_export.json'.format(institution.name)
response = HttpResponse(data, content_type='text/json')
response['Content-Disposition'] = 'attachment; filename={}'.format(filename)
return response
class CreateInstitution(PermissionRequiredMixin, CreateView):
permission_required = 'osf.change_institution'
raise_exception = True
template_name = 'institutions/create.html'
success_url = reverse_lazy('institutions:list')
model = Institution
form_class = InstitutionForm
def get_context_data(self, *args, **kwargs):
kwargs['import_form'] = ImportFileForm()
return super(CreateInstitution, self).get_context_data(*args, **kwargs)
class InstitutionNodeList(PermissionRequiredMixin, ListView):
template_name = 'institutions/node_list.html'
paginate_by = 25
ordering = 'modified'
permission_required = 'osf.view_node'
raise_exception = True
model = Node
def get_queryset(self):
inst = self.kwargs['institution_id']
return Node.objects.filter(affiliated_institutions=inst).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(query_set, page_size)
kwargs.setdefault('nodes', query_set)
kwargs.setdefault('institution', Institution.objects.get(id=self.kwargs['institution_id']))
kwargs.setdefault('page', page)
kwargs.setdefault('logohost', settings.OSF_URL)
return super(InstitutionNodeList, self).get_context_data(**kwargs)
class DeleteInstitution(PermissionRequiredMixin, DeleteView):
permission_required = 'osf.delete_institution'
raise_exception = True
template_name = 'institutions/confirm_delete.html'
success_url = reverse_lazy('institutions:list')
def delete(self, request, *args, **kwargs):
institution = Institution.objects.get(id=self.kwargs['institution_id'])
if institution.nodes.count() > 0:
return redirect('institutions:cannot_delete', institution_id=institution.pk)
return super(DeleteInstitution, self).delete(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
institution = Institution.objects.get(id=self.kwargs['institution_id'])
if institution.nodes.count() > 0:
return redirect('institutions:cannot_delete', institution_id=institution.pk)
return super(DeleteInstitution, self).get(request, *args, **kwargs)
def get_object(self, queryset=None):
institution = Institution.objects.get(id=self.kwargs['institution_id'])
return institution
class CannotDeleteInstitution(TemplateView):
template_name = 'institutions/cannot_delete.html'
def get_context_data(self, **kwargs):
context = super(CannotDeleteInstitution, self).get_context_data(**kwargs)
context['institution'] = Institution.objects.get(id=self.kwargs['institution_id'])
return context
class InstitutionalMetricsAdminRegister(PermissionRequiredMixin, FormView):
permission_required = 'osf.change_institution'
raise_exception = True
template_name = 'institutions/register_institutional_admin.html'
form_class = InstitutionalMetricsAdminRegisterForm
def get_form_kwargs(self):
kwargs = super(InstitutionalMetricsAdminRegister, self).get_form_kwargs()
kwargs['institution_id'] = self.kwargs['institution_id']
return kwargs
def get_context_data(self, **kwargs):
context = super(InstitutionalMetricsAdminRegister, self).get_context_data(**kwargs)
context['institution_name'] = Institution.objects.get(id=self.kwargs['institution_id']).name
return context
def form_valid(self, form):
kwargs = self.get_form_kwargs()
user_id = form.cleaned_data.get('user_id')
osf_user = OSFUser.load(user_id)
institution_id = kwargs['institution_id']
target_institution = Institution.objects.filter(id=institution_id).first()
if not osf_user:
raise Http404('OSF user with id "{}" not found. Please double check.'.format(user_id))
group = Group.objects.filter(name__startswith='institution_{}'.format(target_institution._id)).first()
group.user_set.add(osf_user)
group.save()
osf_user.save()
messages.success(self.request, 'Permissions update successful for OSF User {}!'.format(osf_user.username))
return super(InstitutionalMetricsAdminRegister, self).form_valid(form)
def get_success_url(self):
return reverse('institutions:register_metrics_admin', kwargs={'institution_id': self.kwargs['institution_id']})
| {
"content_hash": "cdf7fde202f68d360a0d5966bf0a39f6",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 119,
"avg_line_length": 39.889830508474574,
"alnum_prop": 0.6963033779477374,
"repo_name": "baylee-d/osf.io",
"id": "bdcb1e17a2fc73ef5e55950310d9566bf70f70a1",
"size": "9414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "admin/institutions/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
"""
=======================================
Visualizing the stock market structure
=======================================
This example employs several unsupervised learning techniques to extract
the stock market structure from variations in historical quotes.
The quantity that we use is the daily variation in quote price: quotes
that are linked tend to cofluctuate during a day.
.. _stock_market:
Learning a graph structure
--------------------------
We use sparse inverse covariance estimation to find which quotes are
correlated conditionally on the others. Specifically, sparse inverse
covariance gives us a graph, that is a list of connection. For each
symbol, the symbols that it is connected too are those useful to explain
its fluctuations.
Clustering
----------
We use clustering to group together quotes that behave similarly. Here,
amongst the :ref:`various clustering techniques <clustering>` available
in the scikit-learn, we use :ref:`affinity_propagation` as it does
not enforce equal-size clusters, and it can choose automatically the
number of clusters from the data.
Note that this gives us a different indication than the graph, as the
graph reflects conditional relations between variables, while the
clustering reflects marginal properties: variables clustered together can
be considered as having a similar impact at the level of the full stock
market.
Embedding in 2D space
---------------------
For visualization purposes, we need to lay out the different symbols on a
2D canvas. For this we use :ref:`manifold` techniques to retrieve 2D
embedding.
Visualization
-------------
The output of the 3 models are combined in a 2D graph where nodes
represents the stocks and edges the:
- cluster labels are used to define the color of the nodes
- the sparse covariance model is used to display the strength of the edges
- the 2D embedding is used to position the nodes in the plan
This example has a fair amount of visualization-related code, as
visualization is crucial here to display the graph. One of the challenge
is to position the labels minimizing overlap. For this we use an
heuristic based on the direction of the nearest neighbor along each
axis.
"""
# Author: Gael Varoquaux gael.varoquaux@normalesup.org
# License: BSD 3 clause
import sys
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
import pandas as pd
from sklearn import cluster, covariance, manifold
print(__doc__)
# #############################################################################
# Retrieve the data from Internet
# The data is from 2003 - 2008. This is reasonably calm: (not too long ago so
# that we get high-tech firms, and before the 2008 crash). This kind of
# historical data can be obtained for from APIs like the quandl.com and
# alphavantage.co ones.
symbol_dict = {
'TOT': 'Total',
'XOM': 'Exxon',
'CVX': 'Chevron',
'COP': 'ConocoPhillips',
'VLO': 'Valero Energy',
'MSFT': 'Microsoft',
'IBM': 'IBM',
'TWX': 'Time Warner',
'CMCSA': 'Comcast',
'CVC': 'Cablevision',
'YHOO': 'Yahoo',
'DELL': 'Dell',
'HPQ': 'HP',
'AMZN': 'Amazon',
'TM': 'Toyota',
'CAJ': 'Canon',
'SNE': 'Sony',
'F': 'Ford',
'HMC': 'Honda',
'NAV': 'Navistar',
'NOC': 'Northrop Grumman',
'BA': 'Boeing',
'KO': 'Coca Cola',
'MMM': '3M',
'MCD': 'McDonald\'s',
'PEP': 'Pepsi',
'K': 'Kellogg',
'UN': 'Unilever',
'MAR': 'Marriott',
'PG': 'Procter Gamble',
'CL': 'Colgate-Palmolive',
'GE': 'General Electrics',
'WFC': 'Wells Fargo',
'JPM': 'JPMorgan Chase',
'AIG': 'AIG',
'AXP': 'American express',
'BAC': 'Bank of America',
'GS': 'Goldman Sachs',
'AAPL': 'Apple',
'SAP': 'SAP',
'CSCO': 'Cisco',
'TXN': 'Texas Instruments',
'XRX': 'Xerox',
'WMT': 'Wal-Mart',
'HD': 'Home Depot',
'GSK': 'GlaxoSmithKline',
'PFE': 'Pfizer',
'SNY': 'Sanofi-Aventis',
'NVS': 'Novartis',
'KMB': 'Kimberly-Clark',
'R': 'Ryder',
'GD': 'General Dynamics',
'RTN': 'Raytheon',
'CVS': 'CVS',
'CAT': 'Caterpillar',
'DD': 'DuPont de Nemours'}
symbols, names = np.array(sorted(symbol_dict.items())).T
quotes = []
for symbol in symbols:
print('Fetching quote history for %r' % symbol, file=sys.stderr)
url = ('https://raw.githubusercontent.com/scikit-learn/examples-data/'
'master/financial-data/{}.csv')
quotes.append(pd.read_csv(url.format(symbol)))
close_prices = np.vstack([q['close'] for q in quotes])
open_prices = np.vstack([q['open'] for q in quotes])
# The daily variations of the quotes are what carry most information
variation = close_prices - open_prices
# #############################################################################
# Learn a graphical structure from the correlations
edge_model = covariance.GraphicalLassoCV()
# standardize the time series: using correlations rather than covariance
# is more efficient for structure recovery
X = variation.copy().T
X /= X.std(axis=0)
edge_model.fit(X)
# #############################################################################
# Cluster using affinity propagation
_, labels = cluster.affinity_propagation(edge_model.covariance_)
n_labels = labels.max()
for i in range(n_labels + 1):
print('Cluster %i: %s' % ((i + 1), ', '.join(names[labels == i])))
# #############################################################################
# Find a low-dimension embedding for visualization: find the best position of
# the nodes (the stocks) on a 2D plane
# We use a dense eigen_solver to achieve reproducibility (arpack is
# initiated with random vectors that we don't control). In addition, we
# use a large number of neighbors to capture the large-scale structure.
node_position_model = manifold.LocallyLinearEmbedding(
n_components=2, eigen_solver='dense', n_neighbors=6)
embedding = node_position_model.fit_transform(X.T).T
# #############################################################################
# Visualization
plt.figure(1, facecolor='w', figsize=(10, 8))
plt.clf()
ax = plt.axes([0., 0., 1., 1.])
plt.axis('off')
# Display a graph of the partial correlations
partial_correlations = edge_model.precision_.copy()
d = 1 / np.sqrt(np.diag(partial_correlations))
partial_correlations *= d
partial_correlations *= d[:, np.newaxis]
non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)
# Plot the nodes using the coordinates of our embedding
plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels,
cmap=plt.cm.nipy_spectral)
# Plot the edges
start_idx, end_idx = np.where(non_zero)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[embedding[:, start], embedding[:, stop]]
for start, stop in zip(start_idx, end_idx)]
values = np.abs(partial_correlations[non_zero])
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.hot_r,
norm=plt.Normalize(0, .7 * values.max()))
lc.set_array(values)
lc.set_linewidths(15 * values)
ax.add_collection(lc)
# Add a label to each node. The challenge here is that we want to
# position the labels to avoid overlap with other labels
for index, (name, label, (x, y)) in enumerate(
zip(names, labels, embedding.T)):
dx = x - embedding[0]
dx[index] = 1
dy = y - embedding[1]
dy[index] = 1
this_dx = dx[np.argmin(np.abs(dy))]
this_dy = dy[np.argmin(np.abs(dx))]
if this_dx > 0:
horizontalalignment = 'left'
x = x + .002
else:
horizontalalignment = 'right'
x = x - .002
if this_dy > 0:
verticalalignment = 'bottom'
y = y + .002
else:
verticalalignment = 'top'
y = y - .002
plt.text(x, y, name, size=10,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
bbox=dict(facecolor='w',
edgecolor=plt.cm.nipy_spectral(label / float(n_labels)),
alpha=.6))
plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),
embedding[0].max() + .10 * embedding[0].ptp(),)
plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),
embedding[1].max() + .03 * embedding[1].ptp())
plt.show()
| {
"content_hash": "f1729ff1fac1f9ce13e049f651785355",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 32.015325670498086,
"alnum_prop": 0.6242221158449018,
"repo_name": "chrsrds/scikit-learn",
"id": "f7c3569d15808490aaa140e556c6cfdd76931c6e",
"size": "8356",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "examples/applications/plot_stock_market.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5255814"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
"""
Base export interface
"""
import re
import urllib
from datetime import datetime, time, timedelta
from types import GeneratorType
import pytz
from flask import current_app, request
from indico.core.config import config
from indico.core.db import db
from indico.core.logger import Logger
from indico.core.notifications import flush_email_queue, init_email_queue
from indico.util.date_time import now_utc
from indico.web.http_api.exceptions import ArgumentParseError, LimitExceededException
from indico.web.http_api.metadata import Serializer
from indico.web.http_api.metadata.atom import AtomSerializer
from indico.web.http_api.metadata.html import HTML4Serializer
from indico.web.http_api.metadata.ical import ICalSerializer
from indico.web.http_api.metadata.jsonp import JSONPSerializer
from indico.web.http_api.responses import HTTPAPIError
from indico.web.http_api.util import get_query_parameter
class HTTPAPIHook(object):
"""This class is the hook between the query (path+params) and the generator of the results (fossil).
It is also in charge of checking the parameters and the access rights.
"""
HOOK_LIST = []
TYPES = None # abstract
PREFIX = 'export' # url prefix. must exist in indico.web.flask.blueprints.api, too! also used as function prefix
RE = None # abstract
METHOD_NAME = None # overrides method name derived from prefix+type
DEFAULT_DETAIL = None # abstract
MAX_RECORDS = {}
SERIALIZER_TYPE_MAP = {} # maps fossil type names to friendly names (useful for plugins e.g. RoomCERN --> Room)
VALID_FORMATS = None # None = all formats
GUEST_ALLOWED = True # When False, it forces authentication
COMMIT = False # commit database changes
HTTP_POST = False # require (and allow) HTTP POST
NO_CACHE = False
@classmethod
def parseRequest(cls, path, queryParams):
"""Parse a request path and return a hook and the requested data type."""
path = urllib.unquote(path)
hooks = cls.HOOK_LIST
for expCls in hooks:
Logger.get('HTTPAPIHook.parseRequest').debug(expCls)
m = expCls._matchPath(path)
if m:
gd = m.groupdict()
g = m.groups()
type = g[0]
format = g[-1]
if format not in DataFetcher.getAllowedFormats():
return None, None
elif expCls.VALID_FORMATS and format not in expCls.VALID_FORMATS:
return None, None
return expCls(queryParams, type, gd, format), format
return None, None
@staticmethod
def register(cls):
"""Register a hook.
To use it, simply decorate the hook class with this method."""
assert cls.RE is not None
HTTPAPIHook.HOOK_LIST.append(cls)
return cls
@classmethod
def _matchPath(cls, path):
if not hasattr(cls, '_RE'):
types = '|'.join(cls.TYPES)
cls._RE = re.compile(r'/' + cls.PREFIX + '/(' + types + r')' + ('/' + cls.RE).rstrip('/') + r'\.(\w+)$')
return cls._RE.match(path)
def __init__(self, queryParams, type, pathParams, format):
self._format = format
self._queryParams = queryParams
self._type = type
self._pathParams = pathParams
def _getParams(self):
self._offset = get_query_parameter(self._queryParams, ['O', 'offset'], 0, integer=True)
if self._offset < 0:
raise HTTPAPIError('Offset must be a positive number', 400)
self._orderBy = get_query_parameter(self._queryParams, ['o', 'order'])
self._descending = get_query_parameter(self._queryParams, ['c', 'descending'], 'no') == 'yes'
self._detail = get_query_parameter(self._queryParams, ['d', 'detail'], self.DEFAULT_DETAIL)
tzName = get_query_parameter(self._queryParams, ['tz'], None)
if tzName is None:
tzName = config.DEFAULT_TIMEZONE
try:
self._tz = pytz.timezone(tzName)
except pytz.UnknownTimeZoneError as e:
raise HTTPAPIError("Bad timezone: '%s'" % e.message, 400)
max = self.MAX_RECORDS.get(self._detail, 1000)
self._userLimit = get_query_parameter(self._queryParams, ['n', 'limit'], 0, integer=True)
if self._userLimit > max:
raise HTTPAPIError("You can only request up to %d records per request with the detail level '%s'" %
(max, self._detail), 400)
self._limit = self._userLimit if self._userLimit > 0 else max
fromDT = get_query_parameter(self._queryParams, ['f', 'from'])
toDT = get_query_parameter(self._queryParams, ['t', 'to'])
dayDT = get_query_parameter(self._queryParams, ['day'])
if (fromDT or toDT) and dayDT:
raise HTTPAPIError("'day' can only be used without 'from' and 'to'", 400)
elif dayDT:
fromDT = toDT = dayDT
self._fromDT = DataFetcher._getDateTime('from', fromDT, self._tz) if fromDT else None
self._toDT = DataFetcher._getDateTime('to', toDT, self._tz, aux=self._fromDT) if toDT else None
def _has_access(self, user):
return True
@property
def serializer_args(self):
return {}
def _getMethodName(self):
if self.METHOD_NAME:
return self.METHOD_NAME
return self.PREFIX + '_' + self._type.replace('-', '_')
def _performCall(self, func, user):
resultList = []
complete = True
try:
res = func(user)
if isinstance(res, GeneratorType):
for obj in res:
resultList.append(obj)
else:
resultList = res
except LimitExceededException:
complete = (self._limit == self._userLimit)
return resultList, complete
def _perform(self, user, func, extra_func):
self._getParams()
if not self._has_access(user):
raise HTTPAPIError('Access to this resource is restricted.', 403)
resultList, complete = self._performCall(func, user)
if isinstance(resultList, current_app.response_class):
return True, resultList, None, None
extra = extra_func(user, resultList) if extra_func else None
return False, resultList, complete, extra
def __call__(self, user):
"""Perform the actual exporting"""
if self.HTTP_POST != (request.method == 'POST'):
# XXX: this should never happen, since HTTP_POST is only used within /api/,
# where the flask url rule requires POST
raise HTTPAPIError('This action requires %s' % ('POST' if self.HTTP_POST else 'GET'), 405)
if not self.GUEST_ALLOWED and not user:
raise HTTPAPIError('Guest access to this resource is forbidden.', 403)
method_name = self._getMethodName()
func = getattr(self, method_name, None)
extra_func = getattr(self, method_name + '_extra', None)
if not func:
raise NotImplementedError(method_name)
if not self.COMMIT:
is_response, resultList, complete, extra = self._perform(user, func, extra_func)
db.session.rollback()
else:
try:
init_email_queue()
is_response, resultList, complete, extra = self._perform(user, func, extra_func)
db.session.commit()
flush_email_queue()
except Exception:
db.session.rollback()
raise
if is_response:
return resultList
return resultList, extra, complete, self.SERIALIZER_TYPE_MAP
class DataFetcher(object):
_deltas = {'yesterday': timedelta(-1),
'tomorrow': timedelta(1)}
def __init__(self, user, hook):
self._user = user
self._hook = hook
@classmethod
def getAllowedFormats(cls):
return Serializer.getAllFormats()
@classmethod
def _parseDateTime(cls, dateTime, allowNegativeOffset):
"""
Accepted formats:
* ISO 8601 subset - YYYY-MM-DD[THH:MM]
* 'today', 'yesterday', 'tomorrow' and 'now'
* days in the future/past: '[+/-]DdHHhMMm'
'ctx' means that the date will change according to its function
('from' or 'to')
"""
# if it's a an "alias", return immediately
now = now_utc()
if dateTime in cls._deltas:
return ('ctx', now + cls._deltas[dateTime])
elif dateTime == 'now':
return ('abs', now)
elif dateTime == 'today':
return ('ctx', now)
m = re.match(r'^([+-])?(?:(\d{1,3})d)?(?:(\d{1,2})h)?(?:(\d{1,2})m)?$', dateTime)
if m:
mod = -1 if m.group(1) == '-' else 1
if not allowNegativeOffset and mod == -1:
raise ArgumentParseError('End date cannot be a negative offset')
atoms = list(0 if a is None else int(a) * mod for a in m.groups()[1:])
if atoms[1] > 23 or atoms[2] > 59:
raise ArgumentParseError("Invalid time!")
return ('ctx', timedelta(days=atoms[0], hours=atoms[1], minutes=atoms[2]))
else:
# iso 8601 subset
try:
return ('abs', datetime.strptime(dateTime, "%Y-%m-%dT%H:%M"))
except ValueError:
pass
try:
return ('ctx', datetime.strptime(dateTime, "%Y-%m-%d"))
except ValueError:
raise ArgumentParseError("Impossible to parse '%s'" % dateTime)
@classmethod
def _getDateTime(cls, ctx, dateTime, tz, aux=None):
try:
rel, value = cls._parseDateTime(dateTime, ctx == 'from')
except ArgumentParseError as e:
raise HTTPAPIError(e.message, 400)
if rel == 'abs':
return tz.localize(value) if not value.tzinfo else value
elif rel == 'ctx' and isinstance(value, timedelta):
value = now_utc() + value
# from here on, 'value' has to be a datetime
if ctx == 'from':
return tz.localize(value.combine(value.date(), time(0, 0, 0)))
else:
return tz.localize(value.combine(value.date(), time(23, 59, 59)))
class IteratedDataFetcher(DataFetcher):
def __init__(self, user, hook):
super(IteratedDataFetcher, self).__init__(user, hook)
self._tz = hook._tz
self._offset = hook._offset
self._limit = hook._limit
self._detail = hook._detail
self._orderBy = hook._orderBy
self._descending = hook._descending
self._fromDT = hook._fromDT
self._toDT = hook._toDT
Serializer.register('html', HTML4Serializer)
Serializer.register('jsonp', JSONPSerializer)
Serializer.register('ics', ICalSerializer)
Serializer.register('atom', AtomSerializer)
| {
"content_hash": "8e0ca4da2c55442f9499f45611c0a1b2",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 117,
"avg_line_length": 38.81494661921708,
"alnum_prop": 0.5974145044466856,
"repo_name": "OmeGak/indico",
"id": "f35b0bf5b1e1d575923432bee3d939ddea73223d",
"size": "11121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/web/http_api/hooks/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "547418"
},
{
"name": "HTML",
"bytes": "1366687"
},
{
"name": "JavaScript",
"bytes": "1678182"
},
{
"name": "Mako",
"bytes": "1340"
},
{
"name": "Python",
"bytes": "4488419"
},
{
"name": "Shell",
"bytes": "2724"
},
{
"name": "TeX",
"bytes": "23051"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
from bokeh.plotting import figure
from bokeh.layouts import row, column
from bokeh.models import HoverTool, ColumnDataSource, Range1d, TapTool
from bokeh.models.callbacks import CustomJS
from dashboard.models import Process, Fibermap, Job
from qlf_models import QLFModels
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Button
from bokeh.resources import CDN
from bokeh.embed import file_html
import os
from astropy.io import fits
qlf_root = os.environ.get('QLF_ROOT')
spectro_redux = os.environ.get('DESI_SPECTRO_REDUX')
class Spectra():
def __init__(self, process_id, arms):
self.selected_process_id = process_id
self.process = Process.objects.get(pk=process_id)
exposure = self.process.exposure
self.exposure = exposure
self.selected_arm = arms
def data_source(self, fmap):
""" Creating data source for plots
"""
data_model = {
'fiber': [],
'color': [],
'cam': [],
'OBJ_TYPE': [],
'ra': [],
'dec': [],
}
ra_tile = fmap.fiber_ra
dec_tile = fmap.fiber_dec
otype_tile = fmap.objtype
y = []
color = []
cam_inst = []
for spec in list(range(10)):
sframe_exists = False
for arm in self.selected_arm:
cam = arm+str(spec)
process_dir = self.process.process_dir
path_exists = os.path.isfile("{}/{}/sframe-{}-{}.fits""".format(
spectro_redux,
process_dir,
cam,
process_dir.split('/')[-1],
))
if path_exists:
sframe_exists = True
if sframe_exists:
y = y + list(range(500))
color = color + ['green']*500
else:
y = y + list(range(500))
color = color + ['lightgray']*500
cam_inst = cam_inst + [cam]*500
data_model['fiber'] = y
data_model['color'] = color
data_model['cam'] = cam_inst
data_model['OBJ_TYPE'] = otype_tile
data_model['ra'] = ra_tile
data_model['dec'] = dec_tile
source = ColumnDataSource(data=data_model)
return source
def wedge_plot(self, wedge_arm, fmap, common_source=None):
ra_center = fmap.exposure.telra
dec_center = fmap.exposure.teldec
fiber_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">FIBER: </span>
<span style="font-size: 1.1vw; color: #515151">@fiber</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">RA: </span>
<span style="font-size: 1.1vw; color: #515151;">@ra</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">DEC: </span>
<span style="font-size: 1.1vw; color: #515151;">@dec</span>
</div>
"""
hover = HoverTool(tooltips=fiber_tooltip)
source = common_source
radius = 0.017
radius_hover = 0.018
plot_space = 0.1
xrange = Range1d(
start=min(source.data['ra'])-plot_space, end=max(source.data['ra'])+plot_space)
yrange = Range1d(start=min(
source.data['dec'])-plot_space, end=max(source.data['dec'])+plot_space)
p = figure(title='FIBERS (ARM %s)' % (','.join(wedge_arm)),
x_axis_label='RA',
y_axis_label='DEC',
plot_width=600,
plot_height=600,
tools=[
hover, "box_zoom,pan,wheel_zoom,reset,lasso_select,crosshair,tap"],
active_drag="box_zoom",
sizing_mode='scale_width',
x_range=xrange,
y_range=yrange)
p.title.align = 'center'
p.circle('ra', 'dec', source=source, name="data", radius=radius,
fill_color={'field': 'color'},
line_color='black', line_width=0.4,
hover_line_color='red')
p.circle('ra', 'dec', source=source, name="data", radius=radius_hover,
hover_fill_color={'field': 'color'}, fill_color=None,
line_color=None, line_width=3, hover_line_color='red')
taptool = p.select(type=TapTool)
taptool.callback = CustomJS(args=dict(source=source), code="""
const selected = source.selected['1d']['indices'][0];
const fiber = source.data['fiber'][selected]
const camera = source.data['cam'][selected]
const color = source.data['color'][selected]
const data = {
"fiber": fiber,
"camera": camera
};
if (color !== 'lightgray')
window.parent.postMessage(data, '*');
""")
return p
def load_spectra(self):
fmap = Fibermap.objects.filter(exposure=self.exposure)[0]
src = self.data_source(fmap)
p = self.wedge_plot(self.selected_arm, fmap, common_source=src)
layout = row(p, sizing_mode='scale_width')
return file_html(layout, CDN, "Spectra")
def load_frame(self, fiber_id, arm):
try:
process_dir = self.process.process_dir
frame_path = "{}/{}/sframe-{}-{}.fits""".format(
spectro_redux,
process_dir,
arm+self.spectrograph,
process_dir.split('/')[-1],
)
frame = fits.open(frame_path)
except:
return None
flux = frame["FLUX"].data
wl = frame["WAVELENGTH"].data
otype = frame['FIBERMAP'].data['OBJTYPE']
fmap = frame["FIBERMAP"].data
return dict(
flux=flux[fiber_id],
wl=wl,
otype=otype[fiber_id],
ra=fmap['RA_OBS'][fiber_id],
dec=fmap['DEC_OBS'][fiber_id],
fid=fiber_id,
brick=fmap['BRICKNAME'][fiber_id],)
def render_spectra(self, fiber, spectrograph):
fiber = int(fiber)
self.spectrograph = spectrograph
# -----------------------
# Bokeh pre-configuration:
color = {'b': "dodgerblue", "r": "red", "z": "magenta"}
hover_color = {'b': "red", "r": "darkblue", "z": "darkblue"}
spec_tooltip = """
<div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Wavelength: </span>
<span style="font-size: 1.1vw; color: #515151">@wlength Å</span>
</div>
<div>
<span style="font-size: 1vw; font-weight: bold; color: #303030;">Counts: </span>
<span style="font-size: 1.1vw; color: #515151;">@spec</span>
</div>
</div>
"""
spec_hover = HoverTool(tooltips=spec_tooltip,
mode='vline', names=['bar'])
# -------------------
# Saving ploting data:
fluxes = {}
ra_obs = ''
dec_obs = ''
for arm in self.selected_arm:
fl = self.load_frame(fiber, arm)
if fl:
fluxes.update({arm: fl})
ra_obs = fl['ra']
dec_obs = fl['dec']
for arm in [list(fluxes.keys())[0]]:
brick = fluxes[arm]['brick']
obj_name = fluxes[arm]['otype']
# ------------
# Bokeh Plots:
p_spec = figure(title="Brick name: "+brick+', Fiber ID: %d, RA: %.2f, DEC: %.2f' % (fiber, ra_obs, dec_obs),
x_axis_label='Wavelength (A)',
y_axis_label=obj_name+' Flux (counts)',
plot_width=800, plot_height=340,
active_drag="box_zoom",
tools=[spec_hover, "pan,box_zoom,reset,crosshair, wheel_zoom"], sizing_mode='scale_width')
for arm in fluxes.keys():
spec_source = ColumnDataSource(data={'wlength': fluxes[arm]['wl'],
'spec': fluxes[arm]['flux'],
})
p_spec.line('wlength', 'spec', color=color[arm],
source=spec_source,)
p_spec.vbar('wlength', top='spec',
width=1., source=spec_source,
name="bar",
color=None, hover_color=hover_color[arm])
# -----------
# Formatting:
font_size = "1.1vw"
for plot in [p_spec]:
plot.xaxis.major_label_text_font_size = font_size
plot.yaxis.major_label_text_font_size = font_size
plot.xaxis.axis_label_text_font_size = font_size
plot.yaxis.axis_label_text_font_size = font_size
plot.legend.label_text_font_size = font_size
plot.title.text_font_size = font_size
callback = CustomJS(code="""
data={'back': true}
window.parent.postMessage(data, '*');
""")
button = Button(label="Back", button_type="warning", callback=callback)
layout = column(p_spec, widgetbox(button), sizing_mode='scale_width')
return file_html(layout, CDN, "Spectra")
| {
"content_hash": "a0fc3683f70e4c5e385e980d0972ffbc",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 117,
"avg_line_length": 36.87547169811321,
"alnum_prop": 0.482705689725747,
"repo_name": "desihub/qlf",
"id": "065eb1813c86b030c7ff3accbc3f5c76f459de41",
"size": "9772",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/framework/qlf/dashboard/bokeh/spectra/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "440"
},
{
"name": "Dockerfile",
"bytes": "2304"
},
{
"name": "HTML",
"bytes": "3969"
},
{
"name": "JavaScript",
"bytes": "390225"
},
{
"name": "Jupyter Notebook",
"bytes": "50033"
},
{
"name": "Python",
"bytes": "306541"
},
{
"name": "Shell",
"bytes": "6807"
}
],
"symlink_target": ""
} |
"""Utility classes for saving model checkpoints."""
import datetime
import os
import pickle
from typing import Any
from absl import logging
from acme import core
from acme.tf import savers as tf_savers
import jax
import numpy as np
import tree
# Internal imports.
CheckpointState = Any
_DEFAULT_CHECKPOINT_TTL = int(datetime.timedelta(days=5).total_seconds())
_ARRAY_NAME = 'array_nest'
_EXEMPLAR_NAME = 'nest_exemplar'
def restore_from_path(ckpt_dir: str) -> CheckpointState:
"""Restore the state stored in ckpt_dir."""
array_path = os.path.join(ckpt_dir, _ARRAY_NAME)
exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)
with open(exemplar_path, 'rb') as f:
exemplar = pickle.load(f)
with open(array_path, 'rb') as f:
files = np.load(f, allow_pickle=True)
flat_state = [files[key] for key in files.files]
unflattened_tree = tree.unflatten_as(exemplar, flat_state)
def maybe_convert_to_python(value, numpy):
return value if numpy else value.item()
return tree.map_structure(maybe_convert_to_python, unflattened_tree, exemplar)
def save_to_path(ckpt_dir: str, state: CheckpointState):
"""Save the state in ckpt_dir."""
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
is_numpy = lambda x: isinstance(x, (np.ndarray, jax.Array))
flat_state = tree.flatten(state)
nest_exemplar = tree.map_structure(is_numpy, state)
array_path = os.path.join(ckpt_dir, _ARRAY_NAME)
logging.info('Saving flattened array nest to %s', array_path)
def _disabled_seek(*_):
raise AttributeError('seek() is disabled on this object.')
with open(array_path, 'wb') as f:
setattr(f, 'seek', _disabled_seek)
np.savez(f, *flat_state)
exemplar_path = os.path.join(ckpt_dir, _EXEMPLAR_NAME)
logging.info('Saving nest exemplar to %s', exemplar_path)
with open(exemplar_path, 'wb') as f:
pickle.dump(nest_exemplar, f)
# Use TF checkpointer.
class Checkpointer(tf_savers.Checkpointer):
def __init__(
self,
object_to_save: core.Saveable,
directory: str = '~/acme',
subdirectory: str = 'default',
**tf_checkpointer_kwargs):
super().__init__(dict(saveable=object_to_save),
directory=directory,
subdirectory=subdirectory,
**tf_checkpointer_kwargs)
CheckpointingRunner = tf_savers.CheckpointingRunner
| {
"content_hash": "743f6ea5955701dbafcfea783a624bf5",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 80,
"avg_line_length": 28.817073170731707,
"alnum_prop": 0.6855691917054592,
"repo_name": "deepmind/acme",
"id": "2b5d5a013a8bf28cb9798373f62c28f447be18d4",
"size": "2979",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "acme/jax/savers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2182865"
},
{
"name": "Shell",
"bytes": "2668"
}
],
"symlink_target": ""
} |
import logging
import pathlib
from email.message import Message
from typing import NamedTuple, Tuple, List, Dict
import pop3bot
import gerrit_rest
logging.basicConfig(level=logging.DEBUG)
basepath = pathlib.Path(__file__).parent
ProcessedEmail = NamedTuple('ProcessedEmail', [
('messages', List[Tuple[Message, str]]),
('gerritmails', List[Dict[str, str]]),
('changesets', List[Dict])
])
class MockGerrit(gerrit_rest.GerritREST):
def __init__(self):
super().__init__("http://localhost")
def _request(self, name, **kwargs):
raise Exception()
def get_changeset(self, changeid, o=None):
return {'commit': changeid}
def process_email(mbox: str) -> ProcessedEmail:
emails = [(basepath / "resources" / "gerrit_emails" / mbox).open('rb').read()]
messages = list(pop3bot.message_generator(emails))
gerritmails = list(pop3bot.gerritmail_generator(messages))
changesets = list(pop3bot.new_changeset_generator(MockGerrit(), gerritmails))
return ProcessedEmail(messages, gerritmails, changesets)
def test_notgerrit():
result = process_email("notgerrit.mbox")
assert len(result.messages) == 1
assert len(result.gerritmails) == 0
assert len(result.changesets) == 0
def test_new_patchset():
result = process_email("497076.mbox")
assert result.changesets == [{'commit': '9e71f2c950587a0a7a75da49c6af89001803ff17'}]
def test_merged():
result = process_email("497094-merged.mbox")
assert len(result.messages) == 1
assert len(result.gerritmails) == 1
assert len(result.changesets) == 0
| {
"content_hash": "f2b4f1410f45c936c4d8067286d710e0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 88,
"avg_line_length": 27.46551724137931,
"alnum_prop": 0.6917765222849969,
"repo_name": "valhallasw/gerrit-reviewer-bot",
"id": "8aa30be04898d0c7860d84df815efbba89cd6350",
"size": "1593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_pop3bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17329"
},
{
"name": "Shell",
"bytes": "896"
}
],
"symlink_target": ""
} |
import string, pickle, base64
from database import get, put
### INTERNAL VARIABLES ###
# Minimum acceptable name size.
minname = 2
# Maximum acceptable name size.
maxname = 32
# Characters allowed in a name. (whitelist)
allowednamechars = string.ascii_letters + string.digits + string.punctuation + string.whitespace
# Characters disallowed in a name. (blacklist)
badnamechars = "{}[]<>()|\\*,"
### Helper Functions ###
# Check if a player name is allowed.
def goodname(name):
if len(name) > maxname or len(name) < minname:
return False
for char in name:
if not char in allowednamechars or char in badnamechars:
return False
return True
# Send a message to a player.
def send(S, sender, mesg):
S.send("PRIVMSG {0} :{1}\r\n".format(sender, mesg))
# Check if player is signed in.
def online(DB, player):
rows = get(DB, "SELECT online FROM players WHERE username='{0}'".format(player))
if len(rows) and rows[0][0]:
return True
else:
return False
# Set player's online status.
def setonline(DB, player, status):
put(DB, "UPDATE players SET online='{0}' WHERE username='{1}'".format(status, player))
# Find which room a player is in.
def getroom(DB, player):
return get(DB, "SELECT room FROM players WHERE username='{0}'".format(player))[0][0]
# Get room info by ID.
def roomstat(DB, room):
name = get(DB, "SELECT name FROM rooms WHERE id='{0}'".format(room))[0][0]
desc = get(DB, "SELECT desc FROM rooms WHERE id='{0}'".format(room))[0][0]
owner = get(DB, "SELECT owner FROM rooms WHERE id='{0}'".format(room))[0][0]
exits = str2obj(get(DB, "SELECT exits FROM rooms WHERE id='{0}'".format(room))[0][0])
items = str2obj(get(DB, "SELECT items FROM rooms WHERE id='{0}'".format(room))[0][0])
locked = get(DB, "SELECT locked FROM rooms WHERE id='{0}'".format(room))[0][0]
return {"name": name, "desc": desc, "owner": owner, "exits": exits,
"items": items, "locked": locked}
# Get player info by username.
def playerstat(DB, player):
name = get(DB, "SELECT name FROM players WHERE username='{0}'".format(player))[0][0]
desc = get(DB, "SELECT desc FROM players WHERE username='{0}'".format(player))[0][0]
online = get(DB, "SELECT online FROM players WHERE username='{0}'".format(player))[0][0]
room = get(DB, "SELECT room FROM players WHERE username='{0}'".format(player))[0][0]
return {"name": name, "desc": desc, "online": online, "room": room}
# Get a list of room's occupants.
def occupants(DB, room):
occupants = []
rows = get(DB, "SELECT username FROM players WHERE online='1' AND room='{0}'".format(room))
for row in rows:
occupants.append(row[0])
return occupants
# Make player enter a room.
def enterroom(DB, room, player):
put(DB, "UPDATE players SET room='{0}' WHERE username='{1}'".format(room, player)) # Set new room.
# Create a new room.
def newroom(DB, name, owner):
roomids = get(DB, "SELECT id FROM rooms") # Find highest room ID.
newid = 0
for rid in roomids:
if rid[0] > newid:
newid = rid[0]
newid += 1
put(DB, """INSERT INTO rooms (name, desc, owner, exits, items, id, locked) VALUES ('{0}', ' ', '{1}', 'gAJ9cQAu', 'gAJdcQAu', '{2}', '1')""".format(name, owner, newid))
return newid
# Send a message to all players.
def announce(S, DB, mesg):
rows = get(DB, "SELECT username FROM players WHERE online='1'")
for player in rows:
send(S, player[0], mesg)
# Send a message to all players in a room.
def announce_room(S, DB, room, mesg):
rows = get(DB, "SELECT username FROM players WHERE online='1' AND room='{0}'".format(room))
for player in rows:
send(S, player[0], mesg)
# Convert object to base64 pickled string.
def obj2str(obj):
return base64.b64encode(pickle.dumps(obj, 2))
# Convert base64 pickled string to object.
def str2obj(strobj):
return pickle.loads(base64.b64decode(strobj))
| {
"content_hash": "34d5259caa63111d08466a0e5a69a779",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 169,
"avg_line_length": 33.219298245614034,
"alnum_prop": 0.6796936889358332,
"repo_name": "pariahsoft/Dennis",
"id": "5040ef3f716458d5e41eb24de04fc376e311b97c",
"size": "5170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65198"
}
],
"symlink_target": ""
} |
"""Provide a storage client to save results to persistent storage
Class Listings
--------------
StorageClient
Represent a storage client that supports saving results to HDFS
"""
from hdfs import InsecureClient
import numpy, cv2, tempfile, os
class StorageClient(object):
"""Represent a storage client that supports saving results to HDFS
Methods
-------
save(self, file_name, result)
Save results permanently to persistent storage.
"""
def __init__(self, namenode_url, username, submission_id, camera_id):
"""Initialize an internal client
This constructor initializes an HDFS client.
"""
self._internal_client = InsecureClient(namenode_url, user='CAM2', root='/'.join(['/users', username, str(submission_id), str(camera_id)]))
def save(self, file_name, result):
"""Save results permanently to persistent storage.
This method saves results permanently to persistent storage so that they
can be retrieved by the user later. This method currently accepts results as
numpy.ndarray. If an instance with any other type is passed, the method
will save the string representation of the instance. This enables the method
to save strings, integers, and other primitive data types.
Parameters
----------
file_name : str
The file name to be used to save the results.
result : object
The results to be saved. The `result` can be numpy.ndarray.
If an instance with any other type is passed, the method will
save the string representation of the instance. This enables the
method to save strings, integers, and other primitive data types.
"""
# Make sure the file name is legit
file_name = file_name.replace('/', '.')
# If the result is an OpenCV image, save it as an image.
if (isinstance(result, numpy.ndarray)):
# Create temp files
temp_directory = tempfile.mkdtemp()
temp_image_path = os.path.join(temp_directory, file_name)
cv2.imwrite(temp_image_path, result)
self._internal_client.upload(file_name, temp_image_path, overwrite=True)
# Remove temp files
os.remove(temp_image_path)
os.rmdir(temp_directory)
# Else, save the string representation of the object in a text file.
else:
self._internal_client.write(file_name, str(result), overwrite=True)
| {
"content_hash": "cad35baf832bad4850c7cbb1682787da",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 146,
"avg_line_length": 36.869565217391305,
"alnum_prop": 0.6419025157232704,
"repo_name": "muhammad-alaref/CAM2DistributedBackend",
"id": "f67f153318b1bb7bbb1e197677b9ee4d2a4d48a5",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CAM2DistributedBackend/util/storage_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "48028"
},
{
"name": "Shell",
"bytes": "2648"
}
],
"symlink_target": ""
} |
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
if os.name == "nt":
raise unittest.SkipTest(
"under Windows, test would generate a spurious crash dialog")
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertTrue(
b"Fatal Python error: Cannot recover from stack overflow" in err,
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 5)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'os2', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('3P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('inP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pi'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pi 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pi 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pi 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# long
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P 3cPn'))
# module
check(unittest, size('PnP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3n2P' + PySet_MINSIZE*'nP' + 'nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PI')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n15Pl4Pn9Pn11PI') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nniP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
| {
"content_hash": "99f41ffac4b27d4eff2c1d55cf7748c6",
"timestamp": "",
"source": "github",
"line_count": 912,
"max_line_length": 100,
"avg_line_length": 37.32565789473684,
"alnum_prop": 0.5802414735172292,
"repo_name": "maxdeliso/elevatorSim",
"id": "6e0c6eef31967347477e436526f814f89d36ab89",
"size": "34041",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Lib/test/test_sys.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "513967"
},
{
"name": "C++",
"bytes": "86522751"
},
{
"name": "Objective-C",
"bytes": "2174"
},
{
"name": "Perl",
"bytes": "6080"
},
{
"name": "PowerShell",
"bytes": "1487"
},
{
"name": "Python",
"bytes": "19028751"
},
{
"name": "Shell",
"bytes": "728"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
import six
from mongoengine.queryset import Q
from st2common import log as logging
from st2api.controllers.resource import BaseResourceIsolationControllerMixin
from st2api.controllers.resource import ContentPackResourceController
from st2common.models.api.rule import RuleViewAPI
from st2common.models.system.common import ResourceReference
from st2common.persistence.action import Action
from st2common.persistence.rule import Rule
from st2common.persistence.trigger import TriggerType, Trigger
from st2common.rbac.types import PermissionType
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
__all__ = ["RuleViewController"]
class RuleViewController(
BaseResourceIsolationControllerMixin, ContentPackResourceController
):
"""
Add some extras to a Rule object to make it easier for UI to render a rule. The additions
do not necessarily belong in the Rule itself but are still valuable augmentations.
:Example:
{
"action": {
"description": "Action that executes an arbitrary Linux command on the localhost.",
"parameters": {
"cmd": "echo \"{{trigger.executed_at}}\""
},
"ref": "core.local"
},
"criteria": {},
"description": "Sample rule using an Interval Timer.",
"enabled": false,
"id": "55ea221832ed35759cf3b312",
"name": "sample.with_timer",
"pack": "examples",
"ref": "examples.sample.with_timer",
"tags": [],
"trigger": {
"description": "Triggers on specified intervals. e.g. every 30s, 1week etc.",
"parameters": {
"delta": 5,
"unit": "seconds"
},
"ref": "core.4ad65602-6fb4-4c89-b0f2-b990d7b68bad",
"type": "core.st2.IntervalTimer"
},
"uid": "rule:examples:sample.with_timer"
}
The `description` fields in action and trigger are augmented properties.
"""
model = RuleViewAPI
access = Rule
supported_filters = {"name": "name", "pack": "pack", "user": "context.user"}
query_options = {"sort": ["pack", "name"]}
mandatory_include_fields_retrieve = ["pack", "name", "trigger"]
def get_all(
self,
exclude_attributes=None,
include_attributes=None,
sort=None,
offset=0,
limit=None,
requester_user=None,
**raw_filters,
):
rules = super(RuleViewController, self)._get_all(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
sort=sort,
offset=offset,
limit=limit,
raw_filters=raw_filters,
requester_user=requester_user,
)
result = self._append_view_properties(rules.json)
rules.json = result
return rules
def get_one(self, ref_or_id, requester_user):
from_model_kwargs = {"mask_secrets": True}
rule = self._get_one(
ref_or_id,
permission_type=PermissionType.RULE_VIEW,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
)
result = self._append_view_properties([rule.json])[0]
rule.json = result
return rule
def _append_view_properties(self, rules):
(
action_by_refs,
trigger_by_refs,
trigger_type_by_refs,
) = self._get_referenced_models(rules)
for rule in rules:
action_ref = rule.get("action", {}).get("ref", None)
trigger_ref = rule.get("trigger", {}).get("ref", None)
trigger_type_ref = rule.get("trigger", {}).get("type", None)
action_db = action_by_refs.get(action_ref, None)
if "action" in rule:
rule["action"]["description"] = (
action_db.description if action_db else ""
)
if "trigger" in rule:
rule["trigger"]["description"] = ""
trigger_db = trigger_by_refs.get(trigger_ref, None)
if trigger_db:
rule["trigger"]["description"] = trigger_db.description
# If description is not found in trigger get description from TriggerType
if "trigger" in rule and not rule["trigger"]["description"]:
trigger_type_db = trigger_type_by_refs.get(trigger_type_ref, None)
if trigger_type_db:
rule["trigger"]["description"] = trigger_type_db.description
return rules
def _get_referenced_models(self, rules):
"""
Reduces the number of queries to be made to the DB by creating sets of Actions, Triggers
and TriggerTypes.
"""
action_refs = set()
trigger_refs = set()
trigger_type_refs = set()
for rule in rules:
action_ref = rule.get("action", {}).get("ref", None)
trigger_ref = rule.get("trigger", {}).get("ref", None)
trigger_type_ref = rule.get("trigger", {}).get("type", None)
if action_ref:
action_refs.add(action_ref)
if trigger_ref:
trigger_refs.add(trigger_ref)
if trigger_type_ref:
trigger_type_refs.add(trigger_type_ref)
action_by_refs = {}
trigger_by_refs = {}
trigger_type_by_refs = {}
# The functions that will return args that can used to query.
def ref_query_args(ref):
return {"ref": ref}
def name_pack_query_args(ref):
resource_ref = ResourceReference.from_string_reference(ref=ref)
return {"name": resource_ref.name, "pack": resource_ref.pack}
action_dbs = self._get_entities(
model_persistence=Action, refs=action_refs, query_args=ref_query_args
)
for action_db in action_dbs:
action_by_refs[action_db.ref] = action_db
trigger_dbs = self._get_entities(
model_persistence=Trigger,
refs=trigger_refs,
query_args=name_pack_query_args,
)
for trigger_db in trigger_dbs:
trigger_by_refs[trigger_db.get_reference().ref] = trigger_db
trigger_type_dbs = self._get_entities(
model_persistence=TriggerType,
refs=trigger_type_refs,
query_args=name_pack_query_args,
)
for trigger_type_db in trigger_type_dbs:
trigger_type_by_refs[trigger_type_db.get_reference().ref] = trigger_type_db
return (action_by_refs, trigger_by_refs, trigger_type_by_refs)
def _get_entities(self, model_persistence, refs, query_args):
"""
Returns all the entities for the supplied refs. model_persistence is the persistence
object that will be used to get to the correct query method and the query_args function
to return the ref specific query argument.
This is such a weirdly specific method that it is likely better only in this context.
"""
q = None
for ref in refs:
if not q:
q = Q(**query_args(ref))
else:
q |= Q(**query_args(ref))
if q:
return model_persistence._get_impl().model.objects(q)
return []
rule_view_controller = RuleViewController()
| {
"content_hash": "9a6e7d7c601c649d1554d9fcf09bab5e",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 99,
"avg_line_length": 34.49769585253456,
"alnum_prop": 0.5753406358535934,
"repo_name": "nzlosh/st2",
"id": "39b4682c526709330aa791c0d4eb5cfdfb74095d",
"size": "8114",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "st2api/st2api/controllers/v1/rule_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jinja",
"bytes": "174532"
},
{
"name": "Makefile",
"bytes": "75242"
},
{
"name": "PowerShell",
"bytes": "856"
},
{
"name": "Python",
"bytes": "6453910"
},
{
"name": "Shell",
"bytes": "93607"
},
{
"name": "Starlark",
"bytes": "7236"
}
],
"symlink_target": ""
} |
from inspect import signature
from functools import lru_cache
from typing import Iterable
from numpy import mean, nan
from statistics import stdev
# for population/biased standard deviation use:
# from numpy import std as stdev
from utils import jit
RANKING_METRICS = {}
def differential_expression_metric(case: Iterable[float], control: Iterable[float]):
"""Template for differential-expression metrics.
Args:
case: expression profile of the first class of samples
control: expression profile of the second class of samples
"""
pass
def metric(name):
"""Decorates differential-expression metric.
Args:
name: user-visible name of the metric
"""
def decorator(func):
func.name = name
func_signature = signature(func)
typed_signature = signature(differential_expression_metric)
# check only names and count of parameters
if func_signature.parameters.keys() != typed_signature.parameters.keys():
raise NameError(
f'Signature of "{name}" metric does not match '
f'the template: {typed_signature}'
)
# replace signature to have type annotation
func.__signature__ = typed_signature
# save the metric
RANKING_METRICS[name] = func
return func
return decorator
@metric('difference')
def difference_of_classes(case, control):
return mean(case) - mean(control)
@metric('ratio')
def ratio_of_classes(case, control):
return mean(case) / mean(control) if mean(control) != 0 else nan
@metric('signal_to_noise')
@lru_cache(maxsize=None)
@jit
def signal_to_noise(case, control):
"""Calculates SNR as ratio of means difference and deviation sum.
Case and control has to be tuples or other hashable iterable.
Assumes that there are:
- at least two samples in both case and control
- the samples have non-zero variation
"""
return (
(mean(case) - mean(control))
/
(stdev(case) + stdev(control))
)
| {
"content_hash": "684d45d15f67da06f7ec23204c824027",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 84,
"avg_line_length": 25.691358024691358,
"alnum_prop": 0.6573762614127823,
"repo_name": "sienkie/pathways-analysis",
"id": "277f4e0f2f9482f61681b6ad58fe5db6b7779adf",
"size": "2122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metrics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
import unittest
from matrix import Matrix
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
class MatrixTest(unittest.TestCase):
def test_extract_row_from_one_number_matrix(self):
matrix = Matrix("1")
self.assertEqual(matrix.row(1), [1])
def test_can_extract_row(self):
matrix = Matrix("1 2\n3 4")
self.assertEqual(matrix.row(2), [3, 4])
def test_extract_row_where_numbers_have_different_widths(self):
matrix = Matrix("1 2\n10 20")
self.assertEqual(matrix.row(2), [10, 20])
def test_can_extract_row_from_non_square_matrix(self):
matrix = Matrix("1 2 3\n4 5 6\n7 8 9\n8 7 6")
self.assertEqual(matrix.row(3), [7, 8, 9])
def test_extract_column_from_one_number_matrix(self):
matrix = Matrix("1")
self.assertEqual(matrix.column(1), [1])
def test_can_extract_column(self):
matrix = Matrix("1 2 3\n4 5 6\n7 8 9")
self.assertEqual(matrix.column(3), [3, 6, 9])
def test_can_extract_column_from_non_square_matrix(self):
matrix = Matrix("1 2 3\n4 5 6\n7 8 9\n8 7 6")
self.assertEqual(matrix.column(3), [3, 6, 9, 6])
def test_extract_column_where_numbers_have_different_widths(self):
matrix = Matrix("89 1903 3\n18 3 1\n9 4 800")
self.assertEqual(matrix.column(2), [1903, 3, 4])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "cf935782693e9cee021affac0dd7f18f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 32.883720930232556,
"alnum_prop": 0.6237623762376238,
"repo_name": "N-Parsons/exercism-python",
"id": "3d0be594541caf31f97c770d2ffa31b4169de121",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/matrix/matrix_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555991"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
} |
import BaseHTTPServer
import os
import re
class GitIssues(BaseHTTPServer.BaseHTTPRequestHandler):
repos_dir_in_home = os.path.expanduser("~/.git-issues/")
def html_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def html_404(self):
self.send_response(404)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write("""<!DOCTYPE html>
<h3>Error loading the page</h3>
""")
def json_200(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_GET(self):
f = None
if self.path == "/":
try:
f = open("site/index.html", 'r')
self.html_200()
self.wfile.write(f.read())
except Exception:
self.html_404()
finally:
if f is not None:
f.close()
elif '.js' in self.path or '.css' in self.path \
or '.woff' in self.path or '.ttf' in self.path:
try:
path = self.path
try:
path = self.path[:self.path.index('?')]
except:
# Throwing an error seems like an interesting choice but
# this is what is reality...
pass
f = open("site/{0}".format(path))
self.send_response(200)
if '.js' in self.path:
self.send_header('Content-type', 'text/js')
elif '.woff' in self.path:
self.send_header('Content-type', 'text/woff')
elif '.ttf' in self.path:
self.send_header('Content-type', 'text/ttf')
elif '.css' in self.path:
self.send_header('Content-type', 'text/css')
else:
self.send_header('Content-type', 'text')
self.end_headers()
self.wfile.write(f.read())
except Exception as e:
self.html_404()
finally:
if f is not None:
f.close()
elif re.search("(repo=(.*))", self.path):
repo_matches = re.search("(repo=(.*))", self.path)
try:
f = open("{0}/{1}.json".format(self.repos_dir_in_home, repo_matches.group(2)), 'r')
self.json_200()
self.wfile.write(f.read())
except Exception:
self.json_200()
self.wfile.write('{"error":"Repository file not found."}')
finally:
if f is not None:
f.close()
def main():
try:
repos_dir_in_home = os.path.expanduser("~/.git-issues/")
server = BaseHTTPServer.HTTPServer(('', 5678), GitIssues)
print("Starting Server on http://localhost:5678/")
repo_files = [f for f in os.listdir(repos_dir_in_home) \
if os.path.isfile(os.path.join(repos_dir_in_home, f))]
for repos in repo_files:
print("http://localhost:5678/?repo={0}".format(repos.replace(".json", "")))
server.serve_forever()
except KeyboardInterrupt:
print("^C received, shutting down the server")
server.socket.close()
if __name__ == '__main__':
main()
| {
"content_hash": "6f38ce8fe7c1c22ae2c0d42be8d84347",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 99,
"avg_line_length": 35.15,
"alnum_prop": 0.4833570412517781,
"repo_name": "AutomatedTester/git-issues",
"id": "8b0f9626b5b7177611b56361415d6f9bc2f44f4a",
"size": "3537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "git-issues/httpserve.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "114007"
},
{
"name": "HTML",
"bytes": "10303"
},
{
"name": "JavaScript",
"bytes": "241003"
},
{
"name": "Python",
"bytes": "5642"
}
],
"symlink_target": ""
} |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from _restobject import RestObject
def ex48_set_bios_password(restobj, new_password, bios_password=None):
sys.stdout.write("\nEXAMPLE 48: Set Bios Password\n")
instances = restobj.search_for_type("Bios.")
for instance in instances:
body = {"AdminPassword": new_password, \
"OldAdminPassword": bios_password}
response = restobj.rest_patch(instance["href"], body, \
bios_password)
restobj.error_handler(response)
if __name__ == "__main__":
# When running on the server locally use the following commented values
# iLO_https_url = "blobstore://."
# iLO_account = "None"
# iLO_password = "None"
# When running remotely connect using the iLO secured (https://) address,
# iLO account name, and password to send https requests
# iLO_https_url acceptable examples:
# "https://10.0.0.100"
# "https://f250asha.americas.hpqcorp.net"
iLO_https_url = "https://10.0.0.100"
iLO_account = "admin"
iLO_password = "password"
#Create a REST object
REST_OBJ = RestObject(iLO_https_url, iLO_account, iLO_password)
ex48_set_bios_password(REST_OBJ, "newpassword", "biospassword")
| {
"content_hash": "3007d298c2ac2413b07d0a3309430114",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 39.93478260869565,
"alnum_prop": 0.6548720740337507,
"repo_name": "HewlettPackard/python-proliant-sdk",
"id": "37e8378af030385041cc563e902e7e6f78192235",
"size": "1899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Rest/ex48_set_bios_password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('account', '0007_subscription_active'),
]
operations = [
migrations.RemoveField(
model_name='subscription',
name='active',
),
migrations.AddField(
model_name='subscription',
name='expires',
field=models.DateTimeField(default=datetime.datetime(2015, 1, 3, 4, 10, 23, 394829, tzinfo=utc)),
preserve_default=False,
),
]
| {
"content_hash": "3b0dd4bed9c837b9706f7082d8222b9a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 109,
"avg_line_length": 25.68,
"alnum_prop": 0.6074766355140186,
"repo_name": "pizzapanther/Super-Neutron-Drive",
"id": "ab5c585be90ab77af032c3cdd5d8799fbb8a8230",
"size": "666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/account/migrations/0008_auto_20150103_0410.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "139458"
},
{
"name": "HTML",
"bytes": "101330"
},
{
"name": "JavaScript",
"bytes": "851908"
},
{
"name": "Python",
"bytes": "79810"
},
{
"name": "Shell",
"bytes": "4847"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import unittest
import requests
import unittest.mock as mock
import dockercloud
from dockercloud.api.base import send_request
from .fake_api import fake_resp
class SendRequestTestCase(unittest.TestCase):
@mock.patch('dockercloud.api.http.Request', return_value=requests.Request('GET', 'http://fake.com'))
@mock.patch.object(dockercloud.api.http.Session, 'send')
def test_http_send_request(self, mock_send, mock_Request):
json_obj = {'key': 'value'}
mock_send.return_value = fake_resp(lambda: (None, json_obj))
self.assertRaises(dockercloud.ApiError, send_request, 'METHOD', 'path', data='data')
headers = {'Content-Type': 'application/json', 'User-Agent': 'python-dockercloud/%s' % dockercloud.__version__}
headers.update(dockercloud.auth.get_auth_header())
mock_send.return_value = fake_resp(lambda: (200, json_obj))
self.assertEqual(json_obj, send_request('METHOD', 'path'))
mock_send.return_value = fake_resp(lambda: (204, json_obj))
self.assertIsNone(send_request('METHOD', 'path'))
mock_send.return_value = fake_resp(lambda: (401, json_obj))
self.assertRaises(dockercloud.AuthError, send_request, 'METHOD', 'path')
mock_send.return_value = fake_resp(lambda: (500, json_obj))
self.assertRaises(dockercloud.ApiError, send_request, 'METHOD', 'path')
| {
"content_hash": "5d431fd0cb783b6d76067ef782b976f9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 119,
"avg_line_length": 42.90909090909091,
"alnum_prop": 0.6871468926553672,
"repo_name": "penkin/python-dockercloud",
"id": "88802a406e8fbba2af1af8882438dfe3e5cf3025",
"size": "1416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "210419"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pip2'
copyright = '2012, Pip2 Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1.dev1' # If changing, update setup.py and pip2/__init__.py
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pip2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pip2.tex', 'pip2 Documentation',
'Pip2 Authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pip2', 'pip2 Documentation',
['Pip2 Authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pip2', 'pip2 Documentation',
'Pip2 Authors', 'pip2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "928d5456c64fe7ead49e66321301ec86",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 32.10480349344978,
"alnum_prop": 0.6999455930359086,
"repo_name": "osupython/pip2",
"id": "b113c00d4fd919f1956740d90b1c1b9c7648d46b",
"size": "7790",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42559"
}
],
"symlink_target": ""
} |
import login
import logout
import home
import error
| {
"content_hash": "34863c35857e962024a889d1a768c075",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 13,
"avg_line_length": 13,
"alnum_prop": 0.8461538461538461,
"repo_name": "VMatrixTeam/open-matrix",
"id": "e59452661fddc40a50cffa5c3050e86ff8f08446",
"size": "52",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webservice/handlers/index/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "91"
},
{
"name": "CSS",
"bytes": "224841"
},
{
"name": "HTML",
"bytes": "68428"
},
{
"name": "JavaScript",
"bytes": "733814"
},
{
"name": "Python",
"bytes": "106477"
},
{
"name": "Shell",
"bytes": "48"
}
],
"symlink_target": ""
} |
import socket
import os.path
from thread import start_new_thread
import httplib
import mimetypes
mimetypes.init()
host = ''
port = 8080
backlog = 5
size = 1024
newline = "\r\n"
class Request:
def __init__(self, client, data):
self.client = client
self.data = data
self.lines = self.data.strip("\r").split("\n")
self.req_line = self.lines[0]
self.method = self.req_line.split(" ")[0]
self.path = self.req_line.split(" ")[1]
self.http_version = self.req_line.split(" ")[2]
self.headers = {}
for i in range(1, len(self.lines)):
line = self.lines[i]
if not ":" in line:
#end of headers
break
header = line.split(":")[0].strip()
value = line.split(":")[1].strip()
self.headers[header] = value
self.content = "\r\n".join(self.lines[len(self.headers)+1:])
class Response:
def __init__(self, server, client=None, data=None):
self.server = server
self.headers = {}
self.statusCode = 200
self.status = "OK"
self.path = "./index.html"
self.client = client
self.data = data
def header(self, name, value):
if not (name == "Content-Length" or name == "Content-Type"):
self.headers[name] = value
return 1
else:
return 0
def setCode(self, statusCode):
self.statusCode = statusCode
codeMeaning = httplib.responses[int(self.statusCode)]
if codeMeaning:
self.status = codeMeaning
else:
return 0
return 1
def form_headers(self, content):
self.headers["Content-Type"] = mimetypes.types_map["." + self.path.split("/")[-1].split(".")[-1]]
self.headers["Content-Length"] = len(content)
return "".join([str(i)+": "+str(j)+"\r\n" for i,j in self.headers.iteritems()])
def getContent(self):
if not os.path.isfile(self.path):
if not "./404" in self.server.events:
self.path = ".html" # do this so mimetypes interprets data as .html file
return "<h1>404 File Not Found</h1>"
self.server.events["./404"](self.client, self.data)
f = open(self.path, "rb")
f_contents = f.read()
f.close()
return f_contents
def form(self):
global newline
content = self.getContent()
response = "HTTP/1.1 " + str(self.statusCode) + " " + self.status + newline +\
self.form_headers(content) + newline +\
content
return response
def handle(server, client, data):
req = Request(client, data)
if req.path.startswith("/"):
req.path = "." + req.path
else:
req.path = "./" + req.path
if req.path in server.events:
server.events[req.path](client, req)
else:
# no handler
r = Response(server, client, data)
r.path = req.path
client.send(r.form())
try:
client.close()
except:
pass
class Server:
def __init__(self):
self.events = {}
def on(self, event, function):
if event.startswith("/"):
event = "." + event
elif not event.startswith("./"):
event = "./" + event
self.events[str(event)] = function
def start(self):
global host, port, backlog, size
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
data = client.recv(size)
start_new_thread(handle, (self, client, data))
| {
"content_hash": "20b02532aedde799a6e8e5a6951eb6f0",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 105,
"avg_line_length": 27.36764705882353,
"alnum_prop": 0.5370768404083825,
"repo_name": "MattCorrigan/Python-REST-API",
"id": "0027a8db12e2f7b8c2b286cb4ac2dd4ff47a0fe8",
"size": "3745",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "8605"
}
],
"symlink_target": ""
} |
"""Bio.SeqIO support for the binary Standard Flowgram Format (SFF) file format.
SFF was designed by 454 Life Sciences (Roche), the Whitehead Institute for
Biomedical Research and the Wellcome Trust Sanger Institute. SFF was also used
as the native output format from early versions of Ion Torrent's PGM platform
as well. You are expected to use this module via the Bio.SeqIO functions under
the format name "sff" (or "sff-trim" as described below).
For example, to iterate over the records in an SFF file,
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff"):
... print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
...
E3MFGYR02JWQ7T 265 tcagGGTCTACATGTTGGTT...
E3MFGYR02JA6IL 271 tcagTTTTTTTTGGAAAGGA...
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
E3MFGYR02GFKUC 299 tcagCGGCCGGGCCTCTCAT...
E3MFGYR02FTGED 281 tcagTGGTAATGGGGGGAAA...
E3MFGYR02FR9G7 261 tcagCTCCGTAAGAAGGTGC...
E3MFGYR02GAZMS 278 tcagAAAGAAGTAAGGTAAA...
E3MFGYR02HHZ8O 221 tcagACTTTCTTCTTTACCG...
E3MFGYR02GPGB1 269 tcagAAGCAGTGGTATCAAC...
E3MFGYR02F7Z7G 219 tcagAATCATCCACTTTTTA...
Each SeqRecord object will contain all the annotation from the SFF file,
including the PHRED quality scores.
>>> print("%s %i" % (record.id, len(record)))
E3MFGYR02F7Z7G 219
>>> print("%s..." % record.seq[:10])
tcagAATCAT...
>>> print("%r..." % (record.letter_annotations["phred_quality"][:10]))
[22, 21, 23, 28, 26, 15, 12, 21, 28, 21]...
Notice that the sequence is given in mixed case, the central upper case region
corresponds to the trimmed sequence. This matches the output of the Roche
tools (and the 3rd party tool sff_extract) for SFF to FASTA.
>>> print(record.annotations["clip_qual_left"])
4
>>> print(record.annotations["clip_qual_right"])
134
>>> print(record.seq[:4])
tcag
>>> print("%s...%s" % (record.seq[4:20], record.seq[120:134]))
AATCATCCACTTTTTA...CAAAACACAAACAG
>>> print(record.seq[134:])
atcttatcaacaaaactcaaagttcctaactgagacacgcaacaggggataagacaaggcacacaggggataggnnnnnnnnnnn
The annotations dictionary also contains any adapter clip positions
(usually zero), and information about the flows. e.g.
>>> len(record.annotations)
11
>>> print(record.annotations["flow_key"])
TCAG
>>> print(record.annotations["flow_values"][:10])
(83, 1, 128, 7, 4, 84, 6, 106, 3, 172)
>>> print(len(record.annotations["flow_values"]))
400
>>> print(record.annotations["flow_index"][:10])
(1, 2, 3, 2, 2, 0, 3, 2, 3, 3)
>>> print(len(record.annotations["flow_index"]))
219
Note that to convert from a raw reading in flow_values to the corresponding
homopolymer stretch estimate, the value should be rounded to the nearest 100:
>>> print("%r..." % [int(round(value, -2)) // 100
... for value in record.annotations["flow_values"][:10]])
...
[1, 0, 1, 0, 0, 1, 0, 1, 0, 2]...
If a read name is exactly 14 alphanumeric characters, the annotations
dictionary will also contain meta-data about the read extracted by
interpretting the name as a 454 Sequencing System "Universal" Accession
Number. Note that if a read name happens to be exactly 14 alphanumeric
characters but was not generated automatically, these annotation records
will contain nonsense information.
>>> print(record.annotations["region"])
2
>>> print(record.annotations["time"])
[2008, 1, 9, 16, 16, 0]
>>> print(record.annotations["coords"])
(2434, 1658)
As a convenience method, you can read the file with SeqIO format name "sff-trim"
instead of "sff" to get just the trimmed sequences (without any annotation
except for the PHRED quality scores and anything encoded in the read names):
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim"):
... print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
...
E3MFGYR02JWQ7T 260 GGTCTACATGTTGGTTAACC...
E3MFGYR02JA6IL 265 TTTTTTTTGGAAAGGAAAAC...
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
E3MFGYR02GFKUC 295 CGGCCGGGCCTCTCATCGGT...
E3MFGYR02FTGED 277 TGGTAATGGGGGGAAATTTA...
E3MFGYR02FR9G7 256 CTCCGTAAGAAGGTGCTGCC...
E3MFGYR02GAZMS 271 AAAGAAGTAAGGTAAATAAC...
E3MFGYR02HHZ8O 150 ACTTTCTTCTTTACCGTAAC...
E3MFGYR02GPGB1 221 AAGCAGTGGTATCAACGCAG...
E3MFGYR02F7Z7G 130 AATCATCCACTTTTTAACGT...
Looking at the final record in more detail, note how this differs to the
example above:
>>> print("%s %i" % (record.id, len(record)))
E3MFGYR02F7Z7G 130
>>> print("%s..." % record.seq[:10])
AATCATCCAC...
>>> print("%r..." % record.letter_annotations["phred_quality"][:10])
[26, 15, 12, 21, 28, 21, 36, 28, 27, 27]...
>>> len(record.annotations)
3
>>> print(record.annotations["region"])
2
>>> print(record.annotations["coords"])
(2434, 1658)
>>> print(record.annotations["time"])
[2008, 1, 9, 16, 16, 0]
You might use the Bio.SeqIO.convert() function to convert the (trimmed) SFF
reads into a FASTQ file (or a FASTA file and a QUAL file), e.g.
>>> from Bio import SeqIO
>>> try:
... from StringIO import StringIO # Python 2
... except ImportError:
... from io import StringIO # Python 3
...
>>> out_handle = StringIO()
>>> count = SeqIO.convert("Roche/E3MFGYR02_random_10_reads.sff", "sff",
... out_handle, "fastq")
...
>>> print("Converted %i records" % count)
Converted 10 records
The output FASTQ file would start like this:
>>> print("%s..." % out_handle.getvalue()[:50])
@E3MFGYR02JWQ7T
tcagGGTCTACATGTTGGTTAACCCGTACTGATT...
Bio.SeqIO.index() provides memory efficient random access to the reads in an
SFF file by name. SFF files can include an index within the file, which can
be read in making this very fast. If the index is missing (or in a format not
yet supported in Biopython) the file is indexed by scanning all the reads -
which is a little slower. For example,
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
E3MFGYR02JHD4H 310 tcagAAAGACAAGTGGTATC...
>>> reads.close()
Or, using the trimmed reads:
>>> from Bio import SeqIO
>>> reads = SeqIO.index("Roche/E3MFGYR02_random_10_reads.sff", "sff-trim")
>>> record = reads["E3MFGYR02JHD4H"]
>>> print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
E3MFGYR02JHD4H 292 AAAGACAAGTGGTATCAACG...
>>> reads.close()
You can also use the Bio.SeqIO.write() function with the "sff" format. Note
that this requires all the flow information etc, and thus is probably only
useful for SeqRecord objects originally from reading another SFF file (and
not the trimmed SeqRecord objects from parsing an SFF file as "sff-trim").
As an example, let's pretend this example SFF file represents some DNA which
was pre-amplified with a PCR primers AAAGANNNNN. The following script would
produce a sub-file containing all those reads whose post-quality clipping
region (i.e. the sequence after trimming) starts with AAAGA exactly (the non-
degenerate bit of this pretend primer):
>>> from Bio import SeqIO
>>> records = (record for record in
... SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff")
... if record.seq[record.annotations["clip_qual_left"]:].startswith("AAAGA"))
...
>>> count = SeqIO.write(records, "temp_filtered.sff", "sff")
>>> print("Selected %i records" % count)
Selected 2 records
Of course, for an assembly you would probably want to remove these primers.
If you want FASTA or FASTQ output, you could just slice the SeqRecord. However,
if you want SFF output we have to preserve all the flow information - the trick
is just to adjust the left clip position!
>>> from Bio import SeqIO
>>> def filter_and_trim(records, primer):
... for record in records:
... if record.seq[record.annotations["clip_qual_left"]:].startswith(primer):
... record.annotations["clip_qual_left"] += len(primer)
... yield record
...
>>> records = SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff")
>>> count = SeqIO.write(filter_and_trim(records, "AAAGA"),
... "temp_filtered.sff", "sff")
...
>>> print("Selected %i records" % count)
Selected 2 records
We can check the results, note the lower case clipped region now includes the "AAAGA"
sequence:
>>> for record in SeqIO.parse("temp_filtered.sff", "sff"):
... print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
...
E3MFGYR02JHD4H 310 tcagaaagaCAAGTGGTATC...
E3MFGYR02GAZMS 278 tcagaaagaAGTAAGGTAAA...
>>> for record in SeqIO.parse("temp_filtered.sff", "sff-trim"):
... print("%s %i %s..." % (record.id, len(record), record.seq[:20]))
...
E3MFGYR02JHD4H 287 CAAGTGGTATCAACGCAGAG...
E3MFGYR02GAZMS 266 AGTAAGGTAAATAACAAACG...
>>> import os
>>> os.remove("temp_filtered.sff")
For a description of the file format, please see the Roche manuals and:
http://www.ncbi.nlm.nih.gov/Traces/trace.cgi?cmd=show&f=formats&m=doc&s=formats
"""
from __future__ import print_function
from Bio.SeqIO.Interfaces import SequenceWriter
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
import struct
import sys
import re
from Bio._py3k import _bytes_to_string, _as_bytes
_null = b"\0"
_sff = b".sff"
_hsh = b".hsh"
_srt = b".srt"
_mft = b".mft"
_flag = b"\xff"
__docformat__ = "restructuredtext en"
def _check_mode(handle):
"""Ensure handle not opened in text mode.
Ensures mode is not set for Universal new line
and ensures mode is binary for Windows
"""
# TODO - Does this need to be stricter under Python 3?
mode = ""
if hasattr(handle, "mode"):
mode = handle.mode
if mode == 1:
# gzip.open(...) does this, fine
return
mode = str(mode)
if mode and "U" in mode.upper():
raise ValueError("SFF files must NOT be opened in universal new "
"lines mode. Binary mode is recommended (although "
"on Unix the default mode is also fine).")
elif mode and "B" not in mode.upper() \
and sys.platform == "win32":
raise ValueError("SFF files must be opened in binary mode on Windows")
def _sff_file_header(handle):
"""Read in an SFF file header (PRIVATE).
Assumes the handle is at the start of the file, will read forwards
though the header and leave the handle pointing at the first record.
Returns a tuple of values from the header (header_length, index_offset,
index_length, number_of_reads, flows_per_read, flow_chars, key_sequence)
>>> with open("Roche/greek.sff", "rb") as handle:
... values = _sff_file_header(handle)
...
>>> print(values[0])
840
>>> print(values[1])
65040
>>> print(values[2])
256
>>> print(values[3])
24
>>> print(values[4])
800
>>> values[-1]
'TCAG'
"""
_check_mode(handle)
# file header (part one)
# use big endiean encdoing >
# magic_number I
# version 4B
# index_offset Q
# index_length I
# number_of_reads I
# header_length H
# key_length H
# number_of_flows_per_read H
# flowgram_format_code B
# [rest of file header depends on the number of flows and how many keys]
fmt = '>4s4BQIIHHHB'
assert 31 == struct.calcsize(fmt)
data = handle.read(31)
if not data:
raise ValueError("Empty file.")
elif len(data) < 13:
raise ValueError("File too small to hold a valid SFF header.")
magic_number, ver0, ver1, ver2, ver3, index_offset, index_length, \
number_of_reads, header_length, key_length, number_of_flows_per_read, \
flowgram_format = struct.unpack(fmt, data)
if magic_number in [_hsh, _srt, _mft]:
# Probably user error, calling Bio.SeqIO.parse() twice!
raise ValueError("Handle seems to be at SFF index block, not start")
if magic_number != _sff: # 779314790
raise ValueError("SFF file did not start '.sff', but %s"
% repr(magic_number))
if (ver0, ver1, ver2, ver3) != (0, 0, 0, 1):
raise ValueError("Unsupported SFF version in header, %i.%i.%i.%i"
% (ver0, ver1, ver2, ver3))
if flowgram_format != 1:
raise ValueError("Flowgram format code %i not supported"
% flowgram_format)
if (index_offset != 0) ^ (index_length != 0):
raise ValueError("Index offset %i but index length %i"
% (index_offset, index_length))
flow_chars = _bytes_to_string(handle.read(number_of_flows_per_read))
key_sequence = _bytes_to_string(handle.read(key_length))
# According to the spec, the header_length field should be the total number
# of bytes required by this set of header fields, and should be equal to
# "31 + number_of_flows_per_read + key_length" rounded up to the next value
# divisible by 8.
assert header_length % 8 == 0
padding = header_length - number_of_flows_per_read - key_length - 31
assert 0 <= padding < 8, padding
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post header %i byte "
"null padding region contained data." % padding,
BiopythonParserWarning)
return header_length, index_offset, index_length, \
number_of_reads, number_of_flows_per_read, \
flow_chars, key_sequence
def _sff_do_slow_index(handle):
"""Generates an index by scanning though all the reads in an SFF file (PRIVATE).
This is a slow but generic approach if we can't parse the provided index
(if present).
Will use the handle seek/tell functions.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
# Now on to the reads...
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
# NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 # Important for padding calc later!
for read in range(number_of_reads):
record_offset = handle.tell()
if record_offset == index_offset:
# Found index block within reads, ignore it:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
record_offset = offset
# assert record_offset%8 == 0 # Worth checking, but slow
# First the fixed header
data = handle.read(read_header_size)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, data)
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i:\n%s"
% (read_header_length, repr(data)))
# now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post name %i byte "
"padding region contained data" % padding,
BiopythonParserWarning)
assert record_offset + read_header_length == handle.tell()
# now the flowgram values, flowgram index, bases and qualities
size = read_flow_size + 3 * seq_len
handle.seek(size, 1)
# now any padding...
padding = size % 8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post quality %i "
"byte padding region contained data" % padding,
BiopythonParserWarning)
# print("%s %s %i" % (read, name, record_offset))
yield name, record_offset
if handle.tell() % 8 != 0:
raise ValueError(
"After scanning reads, did not end on a multiple of 8")
def _sff_find_roche_index(handle):
"""Locate any existing Roche style XML meta data and read index (PRIVATE).
Makes a number of hard coded assumptions based on reverse engineered SFF
files from Roche 454 machines.
Returns a tuple of read count, SFF "index" offset and size, XML offset
and size, and the actual read index offset and size.
Raises a ValueError for unsupported or non-Roche index blocks.
"""
handle.seek(0)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
assert handle.tell() == header_length
if not index_offset or not index_offset:
raise ValueError("No index present in this SFF file")
# Now jump to the header...
handle.seek(index_offset)
fmt = ">4s4B"
fmt_size = struct.calcsize(fmt)
data = handle.read(fmt_size)
if not data:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found nothing"
% (index_length, index_offset))
if len(data) < fmt_size:
raise ValueError("Premature end of file? Expected index of size %i at offest %i, found %s"
% (index_length, index_offset, repr(data)))
magic_number, ver0, ver1, ver2, ver3 = struct.unpack(fmt, data)
if magic_number == _mft: # 778921588
# Roche 454 manifest index
# This is typical from raw Roche 454 SFF files (2009), and includes
# both an XML manifest and the sorted index.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
# This is "1.00" as a string
raise ValueError("Unsupported version in .mft index header, %i.%i.%i.%i"
% (ver0, ver1, ver2, ver3))
fmt2 = ">LL"
fmt2_size = struct.calcsize(fmt2)
xml_size, data_size = struct.unpack(fmt2, handle.read(fmt2_size))
if index_length != fmt_size + fmt2_size + xml_size + data_size:
raise ValueError("Problem understanding .mft index header, %i != %i + %i + %i + %i"
% (index_length, fmt_size, fmt2_size, xml_size, data_size))
return number_of_reads, header_length, \
index_offset, index_length, \
index_offset + fmt_size + fmt2_size, xml_size, \
index_offset + fmt_size + fmt2_size + xml_size, data_size
elif magic_number == _srt: # 779317876
# Roche 454 sorted index
# I've had this from Roche tool sfffile when the read identifiers
# had nonstandard lengths and there was no XML manifest.
if (ver0, ver1, ver2, ver3) != (49, 46, 48, 48):
# This is "1.00" as a string
raise ValueError("Unsupported version in .srt index header, %i.%i.%i.%i"
% (ver0, ver1, ver2, ver3))
data = handle.read(4)
if data != _null * 4:
raise ValueError(
"Did not find expected null four bytes in .srt index")
return number_of_reads, header_length, \
index_offset, index_length, \
0, 0, \
index_offset + fmt_size + 4, index_length - fmt_size - 4
elif magic_number == _hsh:
raise ValueError("Hash table style indexes (.hsh) in SFF files are "
"not (yet) supported")
else:
raise ValueError("Unknown magic number %s in SFF index header:\n%s"
% (repr(magic_number), repr(data)))
def ReadRocheXmlManifest(handle):
"""Reads any Roche style XML manifest data in the SFF "index".
The SFF file format allows for multiple different index blocks, and Roche
took advantage of this to define their own index block which also embeds
an XML manifest string. This is not a publically documented extension to
the SFF file format, this was reverse engineered.
The handle should be to an SFF file opened in binary mode. This function
will use the handle seek/tell functions and leave the handle in an
arbitrary location.
Any XML manifest found is returned as a Python string, which you can then
parse as appropriate, or reuse when writing out SFF files with the
SffWriter class.
Returns a string, or raises a ValueError if an Roche manifest could not be
found.
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(
handle)
if not xml_offset or not xml_size:
raise ValueError("No XML manifest found")
handle.seek(xml_offset)
return _bytes_to_string(handle.read(xml_size))
# This is a generator function!
def _sff_read_roche_index(handle):
"""Reads any existing Roche style read index provided in the SFF file (PRIVATE).
Will use the handle seek/tell functions.
This works on ".srt1.00" and ".mft1.00" style Roche SFF index blocks.
Roche SFF indices use base 255 not 256, meaning we see bytes in range the
range 0 to 254 only. This appears to be so that byte 0xFF (character 255)
can be used as a marker character to separate entries (required if the
read name lengths vary).
Note that since only four bytes are used for the read offset, this is
limited to 255^4 bytes (nearly 4GB). If you try to use the Roche sfffile
tool to combine SFF files beyound this limit, they issue a warning and
omit the index (and manifest).
"""
number_of_reads, header_length, index_offset, index_length, xml_offset, \
xml_size, read_index_offset, read_index_size = _sff_find_roche_index(
handle)
# Now parse the read index...
handle.seek(read_index_offset)
fmt = ">5B"
for read in range(number_of_reads):
# TODO - Be more aware of when the index should end?
data = handle.read(6)
while True:
more = handle.read(1)
if not more:
raise ValueError("Premature end of file!")
data += more
if more == _flag:
break
assert data[-1:] == _flag, data[-1:]
name = _bytes_to_string(data[:-6])
off4, off3, off2, off1, off0 = struct.unpack(fmt, data[-6:-1])
offset = off0 + 255 * off1 + 65025 * off2 + 16581375 * off3
if off4:
# Could in theory be used as a fifth piece of offset information,
# i.e. offset =+ 4228250625L*off4, but testing the Roche tools this
# is not the case. They simple don't support such large indexes.
raise ValueError("Expected a null terminator to the read name.")
yield name, offset
if handle.tell() != read_index_offset + read_index_size:
raise ValueError("Problem with index length? %i vs %i"
% (handle.tell(), read_index_offset + read_index_size))
_valid_UAN_read_name = re.compile(r'^[a-zA-Z0-9]{14}$')
def _sff_read_seq_record(handle, number_of_flows_per_read, flow_chars,
key_sequence, alphabet, trim=False):
"""Parse the next read in the file, return data as a SeqRecord (PRIVATE)."""
# Now on to the reads...
# the read header format (fixed part):
# read_header_length H
# name_length H
# seq_len I
# clip_qual_left H
# clip_qual_right H
# clip_adapter_left H
# clip_adapter_right H
# [rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
read_header_length, name_length, seq_len, clip_qual_left, \
clip_qual_right, clip_adapter_left, clip_adapter_right \
= struct.unpack(read_header_fmt, handle.read(read_header_size))
if clip_qual_left:
clip_qual_left -= 1 # python counting
if clip_adapter_left:
clip_adapter_left -= 1 # python counting
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i"
% read_header_length)
# now the name and any padding (remainder of header)
name = _bytes_to_string(handle.read(name_length))
padding = read_header_length - read_header_size - name_length
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post name %i "
"byte padding region contained data" % padding,
BiopythonParserWarning)
# now the flowgram values, flowgram index, bases and qualities
# NOTE - assuming flowgram_format==1, which means struct type H
flow_values = handle.read(read_flow_size) # unpack later if needed
temp_fmt = ">%iB" % seq_len # used for flow index and quals
flow_index = handle.read(seq_len) # unpack later if needed
seq = _bytes_to_string(handle.read(seq_len)) # TODO - Use bytes in Seq?
quals = list(struct.unpack(temp_fmt, handle.read(seq_len)))
# now any padding...
padding = (read_flow_size + seq_len * 3) % 8
if padding:
padding = 8 - padding
if handle.read(padding).count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post quality %i "
"byte padding region contained data" % padding,
BiopythonParserWarning)
# Follow Roche and apply most aggressive of qual and adapter clipping.
# Note Roche seems to ignore adapter clip fields when writing SFF,
# and uses just the quality clipping values for any clipping.
clip_left = max(clip_qual_left, clip_adapter_left)
# Right clipping of zero means no clipping
if clip_qual_right:
if clip_adapter_right:
clip_right = min(clip_qual_right, clip_adapter_right)
else:
# Typical case with Roche SFF files
clip_right = clip_qual_right
elif clip_adapter_right:
clip_right = clip_adapter_right
else:
clip_right = seq_len
# Now build a SeqRecord
if trim:
if clip_left >= clip_right:
# Raise an error?
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Overlapping clip values in SFF record, trimmed to nothing",
BiopythonParserWarning)
seq = ""
quals = []
else:
seq = seq[clip_left:clip_right].upper()
quals = quals[clip_left:clip_right]
# Don't record the clipping values, flow etc, they make no sense now:
annotations = {}
else:
if clip_left >= clip_right:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Overlapping clip values in SFF record", BiopythonParserWarning)
seq = seq.lower()
else:
# This use of mixed case mimics the Roche SFF tool's FASTA output
seq = seq[:clip_left].lower() + \
seq[clip_left:clip_right].upper() + \
seq[clip_right:].lower()
annotations = {"flow_values": struct.unpack(read_flow_fmt, flow_values),
"flow_index": struct.unpack(temp_fmt, flow_index),
"flow_chars": flow_chars,
"flow_key": key_sequence,
"clip_qual_left": clip_qual_left,
"clip_qual_right": clip_qual_right,
"clip_adapter_left": clip_adapter_left,
"clip_adapter_right": clip_adapter_right}
if re.match(_valid_UAN_read_name, name):
annotations["time"] = _get_read_time(name)
annotations["region"] = _get_read_region(name)
annotations["coords"] = _get_read_xy(name)
record = SeqRecord(Seq(seq, alphabet),
id=name,
name=name,
description="",
annotations=annotations)
# Dirty trick to speed up this line:
# record.letter_annotations["phred_quality"] = quals
dict.__setitem__(record._per_letter_annotations,
"phred_quality", quals)
# Return the record and then continue...
return record
_powers_of_36 = [36 ** i for i in range(6)]
def _string_as_base_36(string):
"""Interpret a string as a base-36 number as per 454 manual."""
total = 0
for c, power in zip(string[::-1], _powers_of_36):
# For reference: ord('0') = 48, ord('9') = 57
# For reference: ord('A') = 65, ord('Z') = 90
# For reference: ord('a') = 97, ord('z') = 122
if 48 <= ord(c) <= 57:
val = ord(c) - 22 # equivalent to: - ord('0') + 26
elif 65 <= ord(c) <= 90:
val = ord(c) - 65
elif 97 <= ord(c) <= 122:
val = ord(c) - 97
else:
# Invalid character
val = 0
total += val * power
return total
def _get_read_xy(read_name):
"""Extract coordinates from last 5 characters of read name."""
number = _string_as_base_36(read_name[9:])
return divmod(number, 4096)
_time_denominators = [13 * 32 * 24 * 60 * 60,
32 * 24 * 60 * 60,
24 * 60 * 60,
60 * 60,
60]
def _get_read_time(read_name):
"""Extract time from first 6 characters of read name."""
time_list = []
remainder = _string_as_base_36(read_name[:6])
for denominator in _time_denominators:
this_term, remainder = divmod(remainder, denominator)
time_list.append(this_term)
time_list.append(remainder)
time_list[0] += 2000
return time_list
def _get_read_region(read_name):
"""Extract region from read name."""
return int(read_name[8])
def _sff_read_raw_record(handle, number_of_flows_per_read):
"""Extract the next read in the file as a raw (bytes) string (PRIVATE)."""
read_header_fmt = '>2HI'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
raw = handle.read(read_header_size)
read_header_length, name_length, seq_len \
= struct.unpack(read_header_fmt, raw)
if read_header_length < 10 or read_header_length % 8 != 0:
raise ValueError("Malformed read header, says length is %i"
% read_header_length)
# now the four clip values (4H = 8 bytes), and read name
raw += handle.read(8 + name_length)
# and any padding (remainder of header)
padding = read_header_length - read_header_size - 8 - name_length
pad = handle.read(padding)
if pad.count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post name %i "
"byte padding region contained data" % padding,
BiopythonParserWarning)
raw += pad
# now the flowgram values, flowgram index, bases and qualities
raw += handle.read(read_flow_size + seq_len * 3)
padding = (read_flow_size + seq_len * 3) % 8
# now any padding...
if padding:
padding = 8 - padding
pad = handle.read(padding)
if pad.count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post quality %i "
"byte padding region contained data" % padding,
BiopythonParserWarning)
raw += pad
# Return the raw bytes
return raw
class _AddTellHandle(object):
"""Wrapper for handles which do not support the tell method (PRIVATE).
Intended for use with things like network handles where tell (and reverse
seek) are not supported. The SFF file needs to track the current offset in
order to deal with the index block.
"""
def __init__(self, handle):
self._handle = handle
self._offset = 0
def read(self, length):
data = self._handle.read(length)
self._offset += len(data)
return data
def tell(self):
return self._offset
def seek(self, offset):
if offset < self._offset:
raise RuntimeError("Can't seek backwards")
self._handle.read(offset - self._offset)
def close(self):
return self._handle.close()
# This is a generator function!
def SffIterator(handle, alphabet=Alphabet.generic_dna, trim=False):
"""Iterate over Standard Flowgram Format (SFF) reads (as SeqRecord objects).
- handle - input file, an SFF file, e.g. from Roche 454 sequencing.
This must NOT be opened in universal read lines mode!
- alphabet - optional alphabet, defaults to generic DNA.
- trim - should the sequences be trimmed?
The resulting SeqRecord objects should match those from a paired FASTA
and QUAL file converted from the SFF file using the Roche 454 tool
ssfinfo. i.e. The sequence will be mixed case, with the trim regions
shown in lower case.
This function is used internally via the Bio.SeqIO functions:
>>> from Bio import SeqIO
>>> for record in SeqIO.parse("Roche/E3MFGYR02_random_10_reads.sff", "sff"):
... print("%s %i" % (record.id, len(record)))
...
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
You can also call it directly:
>>> with open("Roche/E3MFGYR02_random_10_reads.sff", "rb") as handle:
... for record in SffIterator(handle):
... print("%s %i" % (record.id, len(record)))
...
E3MFGYR02JWQ7T 265
E3MFGYR02JA6IL 271
E3MFGYR02JHD4H 310
E3MFGYR02GFKUC 299
E3MFGYR02FTGED 281
E3MFGYR02FR9G7 261
E3MFGYR02GAZMS 278
E3MFGYR02HHZ8O 221
E3MFGYR02GPGB1 269
E3MFGYR02F7Z7G 219
Or, with the trim option:
>>> with open("Roche/E3MFGYR02_random_10_reads.sff", "rb") as handle:
... for record in SffIterator(handle, trim=True):
... print("%s %i" % (record.id, len(record)))
...
E3MFGYR02JWQ7T 260
E3MFGYR02JA6IL 265
E3MFGYR02JHD4H 292
E3MFGYR02GFKUC 295
E3MFGYR02FTGED 277
E3MFGYR02FR9G7 256
E3MFGYR02GAZMS 271
E3MFGYR02HHZ8O 150
E3MFGYR02GPGB1 221
E3MFGYR02F7Z7G 130
"""
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.ProteinAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold proteins.")
if isinstance(Alphabet._get_base_alphabet(alphabet),
Alphabet.RNAAlphabet):
raise ValueError("Invalid alphabet, SFF files do not hold RNA.")
try:
assert 0 == handle.tell(), "Not at start of file, offset %i" % handle.tell()
except AttributeError:
# Probably a network handle or something like that
handle = _AddTellHandle(handle)
header_length, index_offset, index_length, number_of_reads, \
number_of_flows_per_read, flow_chars, key_sequence \
= _sff_file_header(handle)
# Now on to the reads...
# the read header format (fixed part):
# read_header_length H
# name_length H
# seq_len I
# clip_qual_left H
# clip_qual_right H
# clip_adapter_left H
# clip_adapter_right H
# [rest of read header depends on the name length etc]
read_header_fmt = '>2HI4H'
read_header_size = struct.calcsize(read_header_fmt)
read_flow_fmt = ">%iH" % number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
assert 1 == struct.calcsize(">B")
assert 1 == struct.calcsize(">s")
assert 1 == struct.calcsize(">c")
assert read_header_size % 8 == 0 # Important for padding calc later!
# The spec allows for the index block to be before or even in the middle
# of the reads. We can check that if we keep track of our position
# in the file...
for read in range(number_of_reads):
if index_offset and handle.tell() == index_offset:
offset = index_offset + index_length
if offset % 8:
offset += 8 - (offset % 8)
assert offset % 8 == 0
handle.seek(offset)
# Now that we've done this, we don't need to do it again. Clear
# the index_offset so we can skip extra handle.tell() calls:
index_offset = 0
yield _sff_read_seq_record(handle,
number_of_flows_per_read,
flow_chars,
key_sequence,
alphabet,
trim)
_check_eof(handle, index_offset, index_length)
def _check_eof(handle, index_offset, index_length):
"""Check final padding is OK (8 byte alignment) and file ends (PRIVATE).
Will attempt to spot apparent SFF file concatenation and give an error.
Will not attempt to seek, only moves the handle forward.
"""
offset = handle.tell()
extra = b""
padding = 0
if index_offset and offset <= index_offset:
# Index block then end of file...
if offset < index_offset:
raise ValueError("Gap of %i bytes after final record end %i, "
"before %i where index starts?"
% (index_offset - offset, offset, index_offset))
# Doing read to jump the index rather than a seek
# in case this is a network handle or similar
handle.read(index_offset + index_length - offset)
offset = index_offset + index_length
assert offset == handle.tell(), \
"Wanted %i, got %i, index is %i to %i" \
% (offset, handle.tell(), index_offset, index_offset + index_length)
if offset % 8:
padding = 8 - (offset % 8)
extra = handle.read(padding)
if padding >= 4 and extra[-4:] == _sff:
# Seen this in one user supplied file, should have been
# four bytes of null padding but was actually .sff and
# the start of a new concatenated SFF file!
raise ValueError("Your SFF file is invalid, post index %i byte "
"null padding region ended '.sff' which could "
"be the start of a concatenated SFF file? "
"See offset %i" % (padding, offset))
if padding and not extra:
# TODO - Is this error harmless enough to just ignore?
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is technically invalid as it is missing "
"a terminal %i byte null padding region." % padding,
BiopythonParserWarning)
return
if extra.count(_null) != padding:
import warnings
from Bio import BiopythonParserWarning
warnings.warn("Your SFF file is invalid, post index %i byte "
"null padding region contained data: %r"
% (padding, extra), BiopythonParserWarning)
offset = handle.tell()
assert offset % 8 == 0, \
"Wanted offset %i %% 8 = %i to be zero" % (offset, offset % 8)
# Should now be at the end of the file...
extra = handle.read(4)
if extra == _sff:
raise ValueError("Additional data at end of SFF file, "
"perhaps multiple SFF files concatenated? "
"See offset %i" % offset)
elif extra:
raise ValueError("Additional data at end of SFF file, "
"see offset %i" % offset)
# This is a generator function!
def _SffTrimIterator(handle, alphabet=Alphabet.generic_dna):
"""Iterate over SFF reads (as SeqRecord objects) with trimming (PRIVATE)."""
return SffIterator(handle, alphabet, trim=True)
class SffWriter(SequenceWriter):
"""SFF file writer."""
def __init__(self, handle, index=True, xml=None):
"""Creates the writer object.
- handle - Output handle, ideally in binary write mode.
- index - Boolean argument, should we try and write an index?
- xml - Optional string argument, xml manifest to be recorded in the index
block (see function ReadRocheXmlManifest for reading this data).
"""
_check_mode(handle)
self.handle = handle
self._xml = xml
if index:
self._index = []
else:
self._index = None
def write_file(self, records):
"""Use this to write an entire file containing the given records."""
try:
self._number_of_reads = len(records)
except TypeError:
self._number_of_reads = 0 # dummy value
if not hasattr(self.handle, "seek") \
or not hasattr(self.handle, "tell"):
raise ValueError("A handle with a seek/tell methods is "
"required in order to record the total "
"record count in the file header (once it "
"is known at the end).")
if self._index is not None and \
not (hasattr(self.handle, "seek") and hasattr(self.handle, "tell")):
import warnings
warnings.warn("A handle with a seek/tell methods is required in "
"order to record an SFF index.")
self._index = None
self._index_start = 0
self._index_length = 0
if not hasattr(records, "next"):
records = iter(records)
# Get the first record in order to find the flow information
# we will need for the header.
try:
record = next(records)
except StopIteration:
record = None
if record is None:
# No records -> empty SFF file (or an error)?
# We can't write a header without the flow information.
# return 0
raise ValueError("Must have at least one sequence")
try:
self._key_sequence = _as_bytes(record.annotations["flow_key"])
self._flow_chars = _as_bytes(record.annotations["flow_chars"])
self._number_of_flows_per_read = len(self._flow_chars)
except KeyError:
raise ValueError("Missing SFF flow information")
self.write_header()
self.write_record(record)
count = 1
for record in records:
self.write_record(record)
count += 1
if self._number_of_reads == 0:
# Must go back and record the record count...
offset = self.handle.tell()
self.handle.seek(0)
self._number_of_reads = count
self.write_header()
self.handle.seek(offset) # not essential?
else:
assert count == self._number_of_reads
if self._index is not None:
self._write_index()
return count
def _write_index(self):
assert len(self._index) == self._number_of_reads
handle = self.handle
self._index.sort()
self._index_start = handle.tell() # need for header
# XML...
if self._xml is not None:
xml = _as_bytes(self._xml)
else:
from Bio import __version__
xml = "<!-- This file was output with Biopython %s -->\n" % __version__
xml += "<!-- This XML and index block attempts to mimic Roche SFF files -->\n"
xml += "<!-- This file may be a combination of multiple SFF files etc -->\n"
xml = _as_bytes(xml)
xml_len = len(xml)
# Write to the file...
fmt = ">I4BLL"
fmt_size = struct.calcsize(fmt)
handle.write(_null * fmt_size + xml) # fill this later
fmt2 = ">6B"
assert 6 == struct.calcsize(fmt2)
self._index.sort()
index_len = 0 # don't know yet!
for name, offset in self._index:
# Roche files record the offsets using base 255 not 256.
# See comments for parsing the index block. There may be a faster
# way to code this, but we can't easily use shifts due to odd base
off3 = offset
off0 = off3 % 255
off3 -= off0
off1 = off3 % 65025
off3 -= off1
off2 = off3 % 16581375
off3 -= off2
assert offset == off0 + off1 + off2 + off3, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
off3, off2, off1, off0 = off3 // 16581375, off2 // 65025, \
off1 // 255, off0
assert off0 < 255 and off1 < 255 and off2 < 255 and off3 < 255, \
"%i -> %i %i %i %i" % (offset, off0, off1, off2, off3)
handle.write(name + struct.pack(fmt2, 0,
off3, off2, off1, off0, 255))
index_len += len(name) + 6
# Note any padding in not included:
self._index_length = fmt_size + xml_len + index_len # need for header
# Pad out to an 8 byte boundary (although I have noticed some
# real Roche SFF files neglect to do this depsite their manual
# suggesting this padding should be there):
if self._index_length % 8:
padding = 8 - (self._index_length % 8)
handle.write(_null * padding)
else:
padding = 0
offset = handle.tell()
assert offset == self._index_start + self._index_length + padding, \
"%i vs %i + %i + %i" % (offset, self._index_start,
self._index_length, padding)
# Must now go back and update the index header with index size...
handle.seek(self._index_start)
handle.write(struct.pack(fmt, 778921588, # magic number
49, 46, 48, 48, # Roche index version, "1.00"
xml_len, index_len) + xml)
# Must now go back and update the header...
handle.seek(0)
self.write_header()
handle.seek(offset) # not essential?
def write_header(self):
# Do header...
key_length = len(self._key_sequence)
# file header (part one)
# use big endiean encdoing >
# magic_number I
# version 4B
# index_offset Q
# index_length I
# number_of_reads I
# header_length H
# key_length H
# number_of_flows_per_read H
# flowgram_format_code B
# [rest of file header depends on the number of flows and how many keys]
fmt = '>I4BQIIHHHB%is%is' % (
self._number_of_flows_per_read, key_length)
# According to the spec, the header_length field should be the total
# number of bytes required by this set of header fields, and should be
# equal to "31 + number_of_flows_per_read + key_length" rounded up to
# the next value divisible by 8.
if struct.calcsize(fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(fmt) % 8)
header_length = struct.calcsize(fmt) + padding
assert header_length % 8 == 0
header = struct.pack(fmt, 779314790, # magic number 0x2E736666
0, 0, 0, 1, # version
self._index_start, self._index_length,
self._number_of_reads,
header_length, key_length,
self._number_of_flows_per_read,
1, # the only flowgram format code we support
self._flow_chars, self._key_sequence)
self.handle.write(header + _null * padding)
def write_record(self, record):
"""Write a single additional record to the output file.
This assumes the header has been done.
"""
# Basics
name = _as_bytes(record.id)
name_len = len(name)
seq = _as_bytes(str(record.seq).upper())
seq_len = len(seq)
# Qualities
try:
quals = record.letter_annotations["phred_quality"]
except KeyError:
raise ValueError("Missing PHRED qualities information for %s" % record.id)
# Flow
try:
flow_values = record.annotations["flow_values"]
flow_index = record.annotations["flow_index"]
if self._key_sequence != _as_bytes(record.annotations["flow_key"]) \
or self._flow_chars != _as_bytes(record.annotations["flow_chars"]):
raise ValueError("Records have inconsistent SFF flow data")
except KeyError:
raise ValueError("Missing SFF flow information for %s" % record.id)
except AttributeError:
raise ValueError("Header not written yet?")
# Clipping
try:
clip_qual_left = record.annotations["clip_qual_left"]
if clip_qual_left < 0:
raise ValueError("Negative SFF clip_qual_left value for %s" % record.id)
if clip_qual_left:
clip_qual_left += 1
clip_qual_right = record.annotations["clip_qual_right"]
if clip_qual_right < 0:
raise ValueError("Negative SFF clip_qual_right value for %s" % record.id)
clip_adapter_left = record.annotations["clip_adapter_left"]
if clip_adapter_left < 0:
raise ValueError("Negative SFF clip_adapter_left value for %s" % record.id)
if clip_adapter_left:
clip_adapter_left += 1
clip_adapter_right = record.annotations["clip_adapter_right"]
if clip_adapter_right < 0:
raise ValueError("Negative SFF clip_adapter_right value for %s" % record.id)
except KeyError:
raise ValueError("Missing SFF clipping information for %s" % record.id)
# Capture information for index
if self._index is not None:
offset = self.handle.tell()
# Check the position of the final record (before sort by name)
# Using a four-digit base 255 number, so the upper bound is
# 254*(1)+254*(255)+254*(255**2)+254*(255**3) = 4228250624
# or equivalently it overflows at 255**4 = 4228250625
if offset > 4228250624:
import warnings
warnings.warn("Read %s has file offset %i, which is too large "
"to store in the Roche SFF index structure. No "
"index block will be recorded." % (name, offset))
# No point recoring the offsets now
self._index = None
else:
self._index.append((name, self.handle.tell()))
# the read header format (fixed part):
# read_header_length H
# name_length H
# seq_len I
# clip_qual_left H
# clip_qual_right H
# clip_adapter_left H
# clip_adapter_right H
# [rest of read header depends on the name length etc]
# name
# flow values
# flow index
# sequence
# padding
read_header_fmt = '>2HI4H%is' % name_len
if struct.calcsize(read_header_fmt) % 8 == 0:
padding = 0
else:
padding = 8 - (struct.calcsize(read_header_fmt) % 8)
read_header_length = struct.calcsize(read_header_fmt) + padding
assert read_header_length % 8 == 0
data = struct.pack(read_header_fmt,
read_header_length,
name_len, seq_len,
clip_qual_left, clip_qual_right,
clip_adapter_left, clip_adapter_right,
name) + _null * padding
assert len(data) == read_header_length
# now the flowgram values, flowgram index, bases and qualities
# NOTE - assuming flowgram_format==1, which means struct type H
read_flow_fmt = ">%iH" % self._number_of_flows_per_read
read_flow_size = struct.calcsize(read_flow_fmt)
temp_fmt = ">%iB" % seq_len # used for flow index and quals
data += struct.pack(read_flow_fmt, *flow_values) \
+ struct.pack(temp_fmt, *flow_index) \
+ seq \
+ struct.pack(temp_fmt, *quals)
# now any final padding...
padding = (read_flow_size + seq_len * 3) % 8
if padding:
padding = 8 - padding
self.handle.write(data + _null * padding)
if __name__ == "__main__":
print("Running quick self test")
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
with open(filename, "rb") as handle:
metadata = ReadRocheXmlManifest(handle)
with open(filename, "rb") as handle:
index1 = sorted(_sff_read_roche_index(handle))
with open(filename, "rb") as handle:
index2 = sorted(_sff_do_slow_index(handle))
assert index1 == index2
with open(filename, "rb") as handle:
assert len(index1) == len(list(SffIterator(handle)))
from Bio._py3k import StringIO
from io import BytesIO
with open(filename, "rb") as handle:
assert len(index1) == len(list(SffIterator(BytesIO(handle.read()))))
if sys.platform != "win32" and sys.version_info[0] < 3:
# Can be lazy and treat as binary...
with open(filename, "r") as handle:
assert len(index1) == len(list(SffIterator(handle)))
with open(filename) as handle:
index2 = sorted(_sff_read_roche_index(handle))
assert index1 == index2
with open(filename, "r") as handle:
index2 = sorted(_sff_do_slow_index(handle))
assert index1 == index2
with open(filename, "r") as handle:
assert len(index1) == len(list(SffIterator(handle)))
with open(filename, "r") as handle:
assert len(index1) == len(list(SffIterator(BytesIO(handle.read()))))
with open(filename, "rb") as handle:
sff = list(SffIterator(handle))
with open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb") as handle:
sff2 = list(SffIterator(handle))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
with open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb") as handle:
sff2 = list(SffIterator(handle))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
with open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb") as handle:
sff2 = list(SffIterator(handle))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
with open("../../Tests/Roche/E3MFGYR02_index_at_start.sff", "rb") as handle:
sff2 = list(SffIterator(handle))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
with open("../../Tests/Roche/E3MFGYR02_index_in_middle.sff", "rb") as handle:
sff2 = list(SffIterator(handle))
assert len(sff) == len(sff2)
for old, new in zip(sff, sff2):
assert old.id == new.id
assert str(old.seq) == str(new.seq)
with open(filename, "rb") as handle:
sff_trim = list(SffIterator(handle, trim=True))
with open(filename, "rb") as handle:
print(ReadRocheXmlManifest(handle))
from Bio import SeqIO
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.fasta"
fasta_no_trim = list(SeqIO.parse(filename, "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads_no_trim.qual"
qual_no_trim = list(SeqIO.parse(filename, "qual"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.fasta"
fasta_trim = list(SeqIO.parse(filename, "fasta"))
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.qual"
qual_trim = list(SeqIO.parse(filename, "qual"))
for s, sT, f, q, fT, qT in zip(sff, sff_trim, fasta_no_trim,
qual_no_trim, fasta_trim, qual_trim):
# print("")
print(s.id)
# print(s.seq)
# print(s.letter_annotations["phred_quality"])
assert s.id == f.id == q.id
assert str(s.seq) == str(f.seq)
assert s.letter_annotations[
"phred_quality"] == q.letter_annotations["phred_quality"]
assert s.id == sT.id == fT.id == qT.id
assert str(sT.seq) == str(fT.seq)
assert sT.letter_annotations[
"phred_quality"] == qT.letter_annotations["phred_quality"]
print("Writing with a list of SeqRecords...")
handle = BytesIO()
w = SffWriter(handle, xml=metadata)
w.write_file(sff) # list
data = handle.getvalue()
print("And again with an iterator...")
handle = BytesIO()
w = SffWriter(handle, xml=metadata)
w.write_file(iter(sff))
assert data == handle.getvalue()
# Check 100% identical to the original:
filename = "../../Tests/Roche/E3MFGYR02_random_10_reads.sff"
with open(filename, "rb") as handle:
original = handle.read()
assert len(data) == len(original)
assert data == original
del data
print("-" * 50)
filename = "../../Tests/Roche/greek.sff"
with open(filename, "rb") as handle:
for record in SffIterator(handle):
print(record.id)
with open(filename, "rb") as handle:
index1 = sorted(_sff_read_roche_index(handle))
with open(filename, "rb") as handle:
index2 = sorted(_sff_do_slow_index(handle))
assert index1 == index2
try:
with open(filename, "rb") as handle:
print(ReadRocheXmlManifest(handle))
assert False, "Should fail!"
except ValueError:
pass
with open(filename, "rb") as handle:
for record in SffIterator(handle):
pass
try:
for record in SffIterator(handle):
print(record.id)
assert False, "Should have failed"
except ValueError as err:
print("Checking what happens on re-reading a handle:")
print(err)
"""
# Ugly code to make test files...
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
assert len(index)%8 == 0
# Ugly bit of code to make a fake index at start
records = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open(
"../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
# Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.seek(0)
w.write_header() # this time with index info
w.handle.write(index)
for record in records:
w.write_record(record)
out_handle.close()
records2 = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
i = list(_sff_do_slow_index(
open("../../Tests/Roche/E3MFGYR02_alt_index_at_start.sff", "rb")))
# Ugly bit of code to make a fake index in middle
records = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
out_handle = open(
"../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "w")
index = ".diy1.00This is a fake index block (DIY = Do It Yourself), which is allowed under the SFF standard.\0"
padding = len(index)%8
if padding:
padding = 8 - padding
index += chr(0)*padding
w = SffWriter(out_handle, index=False, xml=None)
# Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records[:5]:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
w.handle.write(index)
for record in records[5:]:
w.write_record(record)
out_handle.seek(0)
w.write_header() # this time with index info
out_handle.close()
records2 = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
j = list(_sff_do_slow_index(
open("../../Tests/Roche/E3MFGYR02_alt_index_in_middle.sff", "rb")))
# Ugly bit of code to make a fake index at end
records = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_random_10_reads.sff", "rb")))
with open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "w") as out_handle:
w = SffWriter(out_handle, index=False, xml=None)
# Fake the header...
w._number_of_reads = len(records)
w._index_start = 0
w._index_length = 0
w._key_sequence = records[0].annotations["flow_key"]
w._flow_chars = records[0].annotations["flow_chars"]
w._number_of_flows_per_read = len(w._flow_chars)
w.write_header()
for record in records:
w.write_record(record)
w._index_start = out_handle.tell()
w._index_length = len(index)
out_handle.write(index)
out_handle.seek(0)
w.write_header() # this time with index info
records2 = list(SffIterator(
open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
for old, new in zip(records, records2):
assert str(old.seq)==str(new.seq)
try:
print(ReadRocheXmlManifest(
open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
assert False, "Should fail!"
except ValueError:
pass
k = list(_sff_do_slow_index(
open("../../Tests/Roche/E3MFGYR02_alt_index_at_end.sff", "rb")))
"""
print("Done")
| {
"content_hash": "69d9df673872bc9825ae4c70d6cb30f0",
"timestamp": "",
"source": "github",
"line_count": 1566,
"max_line_length": 115,
"avg_line_length": 41.44699872286079,
"alnum_prop": 0.5962468801035343,
"repo_name": "poojavade/Genomics_Docker",
"id": "353c6ba9e7cc0a25c02b0bf43b750ff589278827",
"size": "65210",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/SeqIO/SffIO.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
} |
NELVES = 3012210
class LLNode(object):
def __init__(self, v, p, n):
self.value = v
self.prevn = p
self.nextn = n
def insert(self, v):
new = LLNode(v, self, self.nextn)
self.nextn = new
return new
def remove(self):
n = self.nextn
self.prevn.nextn = n
n.prevn = self.prevn
return self
def __str__(self):
return "v: %s, p: %r, n: %r" % (self.value, self.prevn, self.nextn)
def print_list(head, l):
node = head
count = 0
while node is not None and count < l:
print node
node = node.nextn
count += 1
elves_with_presents = set([i for i in range(NELVES)])
presents = [1 for _ in range(NELVES)]
stealing = False
elf_stealing = None
while len(elves_with_presents) > 1:
print len(elves_with_presents)
for index in range(len(presents)):
if not stealing:
if presents[index] > 0:
stealing = True
elf_stealing = index
elif stealing:
if presents[index] > 0:
# if index == elf_stealing:
# break
presents[elf_stealing] += presents[index]
presents[index] = 0
stealing = False
elf_stealing = None
elves_with_presents.remove(index)
for elf in elves_with_presents:
print elf + 1
print
head = LLNode(1, None, None)
node = head
val_to_node = {}
for i in range(1, NELVES):
node = node.insert(i + 1)
val_to_node[i + 1] = node
node.nextn = head
head.prevn = val_to_node[NELVES]
node_count = NELVES
val_across = None
if NELVES % 2 == 0:
val_across = (1 + NELVES / 2) % NELVES
else:
val_across = (1 + (NELVES - 1) / 2) % NELVES
node = head
across = val_to_node[val_across]
while node_count > 1:
if node_count % 100000 == 0:
print node_count
removed = across.remove()
node = node.nextn
across = removed.nextn
if node_count % 2 == 1:
across = across.nextn
node_count -= 1
print node
| {
"content_hash": "f5dac256dbeee5da61202ad0b6da1ba0",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 22.02127659574468,
"alnum_prop": 0.548792270531401,
"repo_name": "jlucangelio/adventofcode",
"id": "0df3aa877cd0b950ba1c6a90c24a80da0aa51359",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day19.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56609"
}
],
"symlink_target": ""
} |
import json
import yaml
from heat_integrationtests.common import test
class TemplateResourceTest(test.HeatIntegrationTest):
"""Prove that we can use the registry in a nested provider."""
template = '''
heat_template_version: 2013-05-23
resources:
secret1:
type: OS::Heat::RandomString
outputs:
secret-out:
value: { get_attr: [secret1, value] }
'''
nested_templ = '''
heat_template_version: 2013-05-23
resources:
secret2:
type: OS::Heat::RandomString
outputs:
value:
value: { get_attr: [secret2, value] }
'''
env_templ = '''
resource_registry:
"OS::Heat::RandomString": nested.yaml
'''
def setUp(self):
super(TemplateResourceTest, self).setUp()
self.client = self.orchestration_client
def test_nested_env(self):
main_templ = '''
heat_template_version: 2013-05-23
resources:
secret1:
type: My::NestedSecret
outputs:
secret-out:
value: { get_attr: [secret1, value] }
'''
nested_templ = '''
heat_template_version: 2013-05-23
resources:
secret2:
type: My::Secret
outputs:
value:
value: { get_attr: [secret2, value] }
'''
env_templ = '''
resource_registry:
"My::Secret": "OS::Heat::RandomString"
"My::NestedSecret": nested.yaml
'''
stack_identifier = self.stack_create(
template=main_templ,
files={'nested.yaml': nested_templ},
environment=env_templ)
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'secret1')
# prove that resource.parent_resource is populated.
sec2 = self.client.resources.get(nested_ident, 'secret2')
self.assertEqual('secret1', sec2.parent_resource)
def test_no_infinite_recursion(self):
"""Prove that we can override a python resource.
And use that resource within the template resource.
"""
stack_identifier = self.stack_create(
template=self.template,
files={'nested.yaml': self.nested_templ},
environment=self.env_templ)
self.assert_resource_is_a_stack(stack_identifier, 'secret1')
def test_nested_stack_delete_then_delete_parent_stack(self):
"""Check the robustness of stack deletion.
This tests that if you manually delete a nested
stack, the parent stack is still deletable.
"""
# disable cleanup so we can call _stack_delete() directly.
stack_identifier = self.stack_create(
template=self.template,
files={'nested.yaml': self.nested_templ},
environment=self.env_templ,
enable_cleanup=False)
nested_ident = self.assert_resource_is_a_stack(stack_identifier,
'secret1')
self._stack_delete(nested_ident)
self._stack_delete(stack_identifier)
def test_change_in_file_path(self):
stack_identifier = self.stack_create(
template=self.template,
files={'nested.yaml': self.nested_templ},
environment=self.env_templ)
stack = self.client.stacks.get(stack_identifier)
secret_out1 = self._stack_output(stack, 'secret-out')
nested_templ_2 = '''
heat_template_version: 2013-05-23
resources:
secret2:
type: OS::Heat::RandomString
outputs:
value:
value: freddy
'''
env_templ_2 = '''
resource_registry:
"OS::Heat::RandomString": new/nested.yaml
'''
self.update_stack(stack_identifier,
template=self.template,
files={'new/nested.yaml': nested_templ_2},
environment=env_templ_2)
stack = self.client.stacks.get(stack_identifier)
secret_out2 = self._stack_output(stack, 'secret-out')
self.assertNotEqual(secret_out1, secret_out2)
self.assertEqual('freddy', secret_out2)
class NestedAttributesTest(test.HeatIntegrationTest):
"""Prove that we can use the template resource references."""
main_templ = '''
heat_template_version: 2014-10-16
resources:
secret2:
type: My::NestedSecret
outputs:
old_way:
value: { get_attr: [secret2, nested_str]}
test_attr1:
value: { get_attr: [secret2, resource.secret1, value]}
test_attr2:
value: { get_attr: [secret2, resource.secret1.value]}
test_ref:
value: { get_resource: secret2 }
'''
env_templ = '''
resource_registry:
"My::NestedSecret": nested.yaml
'''
def setUp(self):
super(NestedAttributesTest, self).setUp()
self.client = self.orchestration_client
def test_stack_ref(self):
nested_templ = '''
heat_template_version: 2014-10-16
resources:
secret1:
type: OS::Heat::RandomString
'''
stack_identifier = self.stack_create(
template=self.main_templ,
files={'nested.yaml': nested_templ},
environment=self.env_templ)
self.assert_resource_is_a_stack(stack_identifier, 'secret2')
stack = self.client.stacks.get(stack_identifier)
test_ref = self._stack_output(stack, 'test_ref')
self.assertIn('arn:openstack:heat:', test_ref)
def test_transparent_ref(self):
"""With the addition of OS::stack_id we can now use the nested resource
more transparently.
"""
nested_templ = '''
heat_template_version: 2014-10-16
resources:
secret1:
type: OS::Heat::RandomString
outputs:
OS::stack_id:
value: {get_resource: secret1}
nested_str:
value: {get_attr: [secret1, value]}
'''
stack_identifier = self.stack_create(
template=self.main_templ,
files={'nested.yaml': nested_templ},
environment=self.env_templ)
self.assert_resource_is_a_stack(stack_identifier, 'secret2')
stack = self.client.stacks.get(stack_identifier)
test_ref = self._stack_output(stack, 'test_ref')
test_attr = self._stack_output(stack, 'old_way')
self.assertNotIn('arn:openstack:heat', test_ref)
self.assertEqual(test_attr, test_ref)
def test_nested_attributes(self):
nested_templ = '''
heat_template_version: 2014-10-16
resources:
secret1:
type: OS::Heat::RandomString
outputs:
nested_str:
value: {get_attr: [secret1, value]}
'''
stack_identifier = self.stack_create(
template=self.main_templ,
files={'nested.yaml': nested_templ},
environment=self.env_templ)
self.assert_resource_is_a_stack(stack_identifier, 'secret2')
stack = self.client.stacks.get(stack_identifier)
old_way = self._stack_output(stack, 'old_way')
test_attr1 = self._stack_output(stack, 'test_attr1')
test_attr2 = self._stack_output(stack, 'test_attr2')
self.assertEqual(old_way, test_attr1)
self.assertEqual(old_way, test_attr2)
class TemplateResourceUpdateTest(test.HeatIntegrationTest):
"""Prove that we can do template resource updates."""
main_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: my_name
two: your_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
main_template_change_prop = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: updated_name
two: your_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
main_template_add_prop = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: my_name
two: your_name
three: third_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
main_template_remove_prop = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: my_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
initial_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: foo
Type: String
two:
Default: bar
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: one}
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
'''
prop_change_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: yikes
Type: String
two:
Default: foo
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: two}
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
'''
prop_add_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: yikes
Type: String
two:
Default: foo
Type: String
three:
Default: bar
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: three}
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
'''
prop_remove_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: yikes
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: one}
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
'''
attr_change_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: foo
Type: String
two:
Default: bar
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: one}
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
something_else:
Value: just_a_string
'''
content_change_tmpl = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: foo
Type: String
two:
Default: bar
Type: String
Resources:
NestedResource:
Type: OS::Heat::RandomString
Properties:
salt: yum
Outputs:
the_str:
Value: {'Fn::GetAtt': [NestedResource, value]}
'''
EXPECTED = (UPDATE, NOCHANGE) = ('update', 'nochange')
scenarios = [
('no_changes', dict(template=main_template,
provider=initial_tmpl,
expect=NOCHANGE)),
('main_tmpl_change', dict(template=main_template_change_prop,
provider=initial_tmpl,
expect=UPDATE)),
('provider_change', dict(template=main_template,
provider=content_change_tmpl,
expect=UPDATE)),
('provider_props_change', dict(template=main_template,
provider=prop_change_tmpl,
expect=UPDATE)),
('provider_props_add', dict(template=main_template_add_prop,
provider=prop_add_tmpl,
expect=UPDATE)),
('provider_props_remove', dict(template=main_template_remove_prop,
provider=prop_remove_tmpl,
expect=NOCHANGE)),
('provider_attr_change', dict(template=main_template,
provider=attr_change_tmpl,
expect=NOCHANGE)),
]
def setUp(self):
super(TemplateResourceUpdateTest, self).setUp()
self.client = self.orchestration_client
def test_template_resource_update_template_schema(self):
stack_identifier = self.stack_create(
template=self.main_template,
files={'the.yaml': self.initial_tmpl})
stack = self.client.stacks.get(stack_identifier)
initial_id = self._stack_output(stack, 'identifier')
initial_val = self._stack_output(stack, 'value')
self.update_stack(stack_identifier,
self.template,
files={'the.yaml': self.provider})
stack = self.client.stacks.get(stack_identifier)
self.assertEqual(initial_id,
self._stack_output(stack, 'identifier'))
if self.expect == self.NOCHANGE:
self.assertEqual(initial_val,
self._stack_output(stack, 'value'))
else:
self.assertNotEqual(initial_val,
self._stack_output(stack, 'value'))
class TemplateResourceUpdateFailedTest(test.HeatIntegrationTest):
"""Prove that we can do updates on a nested stack to fix a stack."""
main_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
keypair:
Type: OS::Nova::KeyPair
Properties:
name: replace-this
save_private_key: false
server:
Type: server_fail.yaml
DependsOn: keypair
'''
nested_templ = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
RealRandom:
Type: OS::Heat::RandomString
'''
def setUp(self):
super(TemplateResourceUpdateFailedTest, self).setUp()
self.client = self.orchestration_client
self.assign_keypair()
def test_update_on_failed_create(self):
# create a stack with "server" dependent on "keypair", but
# keypair fails, so "server" is not created properly.
# We then fix the template and it should succeed.
broken_templ = self.main_template.replace('replace-this',
self.keypair_name)
stack_identifier = self.stack_create(
template=broken_templ,
files={'server_fail.yaml': self.nested_templ},
expected_status='CREATE_FAILED')
fixed_templ = self.main_template.replace('replace-this',
test.rand_name())
self.update_stack(stack_identifier,
fixed_templ,
files={'server_fail.yaml': self.nested_templ})
class TemplateResourceAdoptTest(test.HeatIntegrationTest):
"""Prove that we can do template resource adopt/abandon."""
main_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: my_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
nested_templ = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: foo
Type: String
Resources:
RealRandom:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: one}
Outputs:
the_str:
Value: {'Fn::GetAtt': [RealRandom, value]}
'''
def setUp(self):
super(TemplateResourceAdoptTest, self).setUp()
self.client = self.orchestration_client
def _yaml_to_json(self, yaml_templ):
return yaml.load(yaml_templ)
def test_abandon(self):
stack_identifier = self.stack_create(
template=self.main_template,
files={'the.yaml': self.nested_templ},
enable_cleanup=False
)
info = self.stack_abandon(stack_id=stack_identifier)
self.assertEqual(self._yaml_to_json(self.main_template),
info['template'])
self.assertEqual(self._yaml_to_json(self.nested_templ),
info['resources']['the_nested']['template'])
def test_adopt(self):
data = {
'resources': {
'the_nested': {
"type": "the.yaml",
"resources": {
"RealRandom": {
"type": "OS::Heat::RandomString",
'resource_data': {'value': 'goopie'},
'resource_id': 'froggy'
}
}
}
},
"environment": {"parameters": {}},
"template": yaml.load(self.main_template)
}
stack_identifier = self.stack_adopt(
adopt_data=json.dumps(data),
files={'the.yaml': self.nested_templ})
self.assert_resource_is_a_stack(stack_identifier, 'the_nested')
stack = self.client.stacks.get(stack_identifier)
self.assertEqual('goopie', self._stack_output(stack, 'value'))
class TemplateResourceCheckTest(test.HeatIntegrationTest):
"""Prove that we can do template resource check."""
main_template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
the_nested:
Type: the.yaml
Properties:
one: my_name
Outputs:
identifier:
Value: {Ref: the_nested}
value:
Value: {'Fn::GetAtt': [the_nested, the_str]}
'''
nested_templ = '''
HeatTemplateFormatVersion: '2012-12-12'
Parameters:
one:
Default: foo
Type: String
Resources:
RealRandom:
Type: OS::Heat::RandomString
Properties:
salt: {Ref: one}
Outputs:
the_str:
Value: {'Fn::GetAtt': [RealRandom, value]}
'''
def setUp(self):
super(TemplateResourceCheckTest, self).setUp()
self.client = self.orchestration_client
def test_check(self):
stack_identifier = self.stack_create(
template=self.main_template,
files={'the.yaml': self.nested_templ}
)
self.client.actions.check(stack_id=stack_identifier)
self._wait_for_stack_status(stack_identifier, 'CHECK_COMPLETE')
class TemplateResourceErrorMessageTest(test.HeatIntegrationTest):
"""Prove that nested stack errors don't suck."""
template = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
victim:
Type: fail.yaml
'''
nested_templ = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
oops:
Type: OS::Heat::TestResource
Properties:
fail: true
wait_secs: 2
'''
def setUp(self):
super(TemplateResourceErrorMessageTest, self).setUp()
self.client = self.orchestration_client
def test_fail(self):
stack_identifier = self.stack_create(
template=self.template,
files={'fail.yaml': self.nested_templ},
expected_status='CREATE_FAILED')
stack = self.client.stacks.get(stack_identifier)
exp_path = 'resources.victim.resources.oops'
exp_msg = 'Test Resource failed oops'
exp = 'Resource CREATE failed: ValueError: %s: %s' % (exp_path,
exp_msg)
self.assertEqual(exp, stack.stack_status_reason)
| {
"content_hash": "c35c43ac46eafc68f6328fdba503fed4",
"timestamp": "",
"source": "github",
"line_count": 674,
"max_line_length": 79,
"avg_line_length": 27.68545994065282,
"alnum_prop": 0.5969989281886388,
"repo_name": "rh-s/heat",
"id": "0d5734b3ee8bcb092ff095f1f5ff3ff46a97f2dd",
"size": "19233",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat_integrationtests/functional/test_template_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6288599"
},
{
"name": "Shell",
"bytes": "32845"
}
],
"symlink_target": ""
} |
"""This code is a part of Hydra Toolkit
.. module:: hydratk.translation.lib.network.dbi.client.cs.messages
:platform: Unix
:synopsis: Czech language translation for DB client messages
.. moduleauthor:: Petr Rašek <bowman@hydratk.org>
"""
language = {
'name': 'Čeština',
'ISO-639-1': 'cs'
}
from hydratk.core import const
HIGHLIGHT_START = chr(27) + chr(91) + "1m"
HIGHLIGHT_US = chr(27) + chr(91) + "4m"
HIGHLIGHT_END = chr(27) + chr(91) + "0m"
msg = {
'htk_dbi_unknown_type': ["Neznámý typ procedury: '{0}'"],
'htk_dbi_connecting': ["Připojuji se na server: '{0}'"],
'htk_dbi_connected': ["Spojení se serverem bylo úspěšné"],
'htk_dbi_connecting_error': ["Nastala chyba při spojení se serverem"],
'htk_dbi_disconnecting': ["Ukončuji spojení se serverem"],
'htk_dbi_disconnected': ["Spojení se serverem bylo ukončeno"],
'htk_dbi_disconnecting_error': ["Nastala chyba při ukončování spojení se serverem"],
'htk_dbi_not_connected': ["Není navázáno spojení se serverem"],
'htk_dbi_executing_query': ["Vykonávám dotaz: '{0}'"],
'htk_dbi_query_executed': ["Vykonávání dotazu ukončeno"],
'htk_dbi_query_error': ["Nastala chyba při vykonávání dotazu"],
'htk_dbi_calling_proc': ["Volám proceduru: '{0}'"],
'htk_dbi_proc_called': ["Volání procedury ukončeno: '{0}'"],
'htk_dbi_executing_command': ["Vykonávám příkaz: '{0}'"],
'htk_dbi_command_executed': ["Vykonávání příkazu ukončeno"]
}
| {
"content_hash": "8deaa862d18b666be4e08ab34b56c692",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 39.432432432432435,
"alnum_prop": 0.6620973269362577,
"repo_name": "hydratk/hydratk-lib-network",
"id": "0584acf2c4b4f31390fbedbd2c02d43f436bc953",
"size": "1534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hydratk/translation/lib/network/dbi/client/cs/messages.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "28565"
},
{
"name": "Python",
"bytes": "451841"
}
],
"symlink_target": ""
} |
import json
import logging
import pickle
from typing import Any, Iterable, Optional, Union
import pendulum
from sqlalchemy import Column, LargeBinary, String, and_
from sqlalchemy.orm import Query, Session, reconstructor
from airflow.configuration import conf
from airflow.models.base import COLLATION_ARGS, ID_LEN, Base
from airflow.utils import timezone
from airflow.utils.helpers import is_container
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.sqlalchemy import UtcDateTime
log = logging.getLogger(__name__)
# MAX XCOM Size is 48KB
# https://github.com/apache/airflow/pull/1618#discussion_r68249677
MAX_XCOM_SIZE = 49344
XCOM_RETURN_KEY = 'return_value'
class BaseXCom(Base, LoggingMixin):
"""Base class for XCom objects."""
__tablename__ = "xcom"
key = Column(String(512, **COLLATION_ARGS), primary_key=True)
value = Column(LargeBinary)
timestamp = Column(UtcDateTime, default=timezone.utcnow, nullable=False)
execution_date = Column(UtcDateTime, primary_key=True)
# source information
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
@reconstructor
def init_on_load(self):
"""
Called by the ORM after the instance has been loaded from the DB or otherwise reconstituted
i.e automatically deserialize Xcom value when loading from DB.
"""
self.value = self.orm_deserialize_value()
def __repr__(self):
return f'<XCom "{self.key}" ({self.task_id} @ {self.execution_date})>'
@classmethod
@provide_session
def set(cls, key, value, execution_date, task_id, dag_id, session=None):
"""
Store an XCom value.
:return: None
"""
session.expunge_all()
value = XCom.serialize_value(value)
# remove any duplicate XComs
session.query(cls).filter(
cls.key == key, cls.execution_date == execution_date, cls.task_id == task_id, cls.dag_id == dag_id
).delete()
session.commit()
# insert new XCom
session.add(XCom(key=key, value=value, execution_date=execution_date, task_id=task_id, dag_id=dag_id))
session.commit()
@classmethod
@provide_session
def get_one(
cls,
execution_date: pendulum.DateTime,
key: Optional[str] = None,
task_id: Optional[Union[str, Iterable[str]]] = None,
dag_id: Optional[Union[str, Iterable[str]]] = None,
include_prior_dates: bool = False,
session: Session = None,
) -> Optional[Any]:
"""
Retrieve an XCom value, optionally meeting certain criteria. Returns None
of there are no results.
:param execution_date: Execution date for the task
:type execution_date: pendulum.datetime
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. To remove the filter, pass key=None.
:type key: str
:param task_id: Only XComs from task with matching id will be
pulled. Can pass None to remove the filter.
:type task_id: str
:param dag_id: If provided, only pulls XCom from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_id: str
:param include_prior_dates: If False, only XCom from the current
execution_date are returned. If True, XCom from previous dates
are returned as well.
:type include_prior_dates: bool
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
result = cls.get_many(
execution_date=execution_date,
key=key,
task_ids=task_id,
dag_ids=dag_id,
include_prior_dates=include_prior_dates,
session=session,
).first()
if result:
return result.value
return None
@classmethod
@provide_session
def get_many(
cls,
execution_date: pendulum.DateTime,
key: Optional[str] = None,
task_ids: Optional[Union[str, Iterable[str]]] = None,
dag_ids: Optional[Union[str, Iterable[str]]] = None,
include_prior_dates: bool = False,
limit: Optional[int] = None,
session: Session = None,
) -> Query:
"""
Composes a query to get one or more values from the xcom table.
:param execution_date: Execution date for the task
:type execution_date: pendulum.datetime
:param key: A key for the XCom. If provided, only XComs with matching
keys will be returned. To remove the filter, pass key=None.
:type key: str
:param task_ids: Only XComs from tasks with matching ids will be
pulled. Can pass None to remove the filter.
:type task_ids: str or iterable of strings (representing task_ids)
:param dag_ids: If provided, only pulls XComs from this DAG.
If None (default), the DAG of the calling task is used.
:type dag_ids: str
:param include_prior_dates: If False, only XComs from the current
execution_date are returned. If True, XComs from previous dates
are returned as well.
:type include_prior_dates: bool
:param limit: If required, limit the number of returned objects.
XCom objects can be quite big and you might want to limit the
number of rows.
:type limit: int
:param session: database session
:type session: sqlalchemy.orm.session.Session
"""
filters = []
if key:
filters.append(cls.key == key)
if task_ids:
if is_container(task_ids):
filters.append(cls.task_id.in_(task_ids))
else:
filters.append(cls.task_id == task_ids)
if dag_ids:
if is_container(dag_ids):
filters.append(cls.dag_id.in_(dag_ids))
else:
filters.append(cls.dag_id == dag_ids)
if include_prior_dates:
filters.append(cls.execution_date <= execution_date)
else:
filters.append(cls.execution_date == execution_date)
query = (
session.query(cls)
.filter(and_(*filters))
.order_by(cls.execution_date.desc(), cls.timestamp.desc())
)
if limit:
return query.limit(limit)
else:
return query
@classmethod
@provide_session
def delete(cls, xcoms, session=None):
"""Delete Xcom"""
if isinstance(xcoms, XCom):
xcoms = [xcoms]
for xcom in xcoms:
if not isinstance(xcom, XCom):
raise TypeError(f'Expected XCom; received {xcom.__class__.__name__}')
session.delete(xcom)
session.commit()
@staticmethod
def serialize_value(value: Any):
"""Serialize Xcom value to str or pickled object"""
if conf.getboolean('core', 'enable_xcom_pickling'):
return pickle.dumps(value)
try:
return json.dumps(value).encode('UTF-8')
except (ValueError, TypeError):
log.error(
"Could not serialize the XCom value into JSON."
" If you are using pickle instead of JSON for XCom,"
" then you need to enable pickle support for XCom"
" in your airflow config."
)
raise
@staticmethod
def deserialize_value(result: "XCom") -> Any:
"""Deserialize XCom value from str or pickle object"""
if conf.getboolean('core', 'enable_xcom_pickling'):
try:
return pickle.loads(result.value)
except pickle.UnpicklingError:
return json.loads(result.value.decode('UTF-8'))
else:
try:
return json.loads(result.value.decode('UTF-8'))
except (json.JSONDecodeError, UnicodeDecodeError):
return pickle.loads(result.value)
def orm_deserialize_value(self) -> Any:
"""
Deserialize method which is used to reconstruct ORM XCom object.
This method should be overridden in custom XCom backends to avoid
unnecessary request or other resource consuming operations when
creating XCom orm model. This is used when viewing XCom listing
in the webserver, for example.
"""
return BaseXCom.deserialize_value(self)
def resolve_xcom_backend():
"""Resolves custom XCom class"""
clazz = conf.getimport("core", "xcom_backend", fallback=f"airflow.models.xcom.{BaseXCom.__name__}")
if clazz:
if not issubclass(clazz, BaseXCom):
raise TypeError(
f"Your custom XCom class `{clazz.__name__}` is not a subclass of `{BaseXCom.__name__}`."
)
return clazz
return BaseXCom
XCom = resolve_xcom_backend()
| {
"content_hash": "e0661cf1428a76f52a0f3b98099105e0",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 110,
"avg_line_length": 35.52734375,
"alnum_prop": 0.6102253985706432,
"repo_name": "sekikn/incubator-airflow",
"id": "d9cdacfa065323ec3336bf206d261ffbab4bb303",
"size": "9883",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/models/xcom.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'gradient_steps': GRADIENT_STEPS
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_b(name):
# Same as A but with Uniform(5)
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_c(name):
# Same as A but with Uniform(10)
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(10),
'gradient_steps': GRADIENT_STEPS
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def exp_d(name):
# Same as A but with Uniform(25)
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer',
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200], #, 2500, 2400],
on_power_thresholds=[20, 20, 20], #, 20, 20],
max_input_power=1000,
min_on_durations=[60, 60, 60], #, 1800, 1800],
window=("2013-06-01", "2014-07-01"),
seq_length=1000,
output_one_appliance=False,
boolean_targets=False,
min_off_duration=60,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=0.01),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('abcd'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| {
"content_hash": "1d5a867f63711b2fd6542693c047847c",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 92,
"avg_line_length": 29.171875,
"alnum_prop": 0.5431173004820568,
"repo_name": "JackKelly/neuralnilm_prototype",
"id": "010c9c4900250e34a28477cd117700dee41fc91c",
"size": "7468",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/e104.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4536723"
}
],
"symlink_target": ""
} |
from crosspm.helpers.locker import Locker
class Usedby(Locker):
def __init__(self, config, do_load, recursive):
# Ignore do_load flag
super(Usedby, self).__init__(config, False, recursive)
def usedby_packages(self, deps_file_path=None, depslock_file_path=None, packages=None):
"""
Lock packages. Downloader search packages
"""
if deps_file_path is None:
deps_file_path = self._deps_path
if depslock_file_path is None:
depslock_file_path = self._depslock_path
if deps_file_path == depslock_file_path:
depslock_file_path += '.lock'
if packages is None:
self.search_dependencies(deps_file_path)
else:
self._root_package.packages = packages
self._log.info('Done!')
def search_dependencies(self, depslock_file_path):
self._log.info('Check dependencies ...')
self._root_package.find_usedby(depslock_file_path, property_validate=True)
self._log.info('')
self._log.info('Dependency tree:')
self._root_package.print(0, self._config.output('tree', [{self._config.name_column: 0}]))
def entrypoint(self, *args, **kwargs):
self.usedby_packages(*args, **kwargs)
| {
"content_hash": "e28580e4ccb1551a27fb8d519113994f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 97,
"avg_line_length": 37.26470588235294,
"alnum_prop": 0.6140489344909235,
"repo_name": "devopshq/crosspm",
"id": "3a26e78b8ebbea82969d1b07095941d6524f32b3",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crosspm/helpers/usedby.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "8406"
},
{
"name": "Jinja",
"bytes": "669"
},
{
"name": "Python",
"bytes": "229070"
},
{
"name": "Shell",
"bytes": "8264"
}
],
"symlink_target": ""
} |
""" Cgroups read engine to read the cgroups data periodically
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import logging
import os
import threading
import time
from treadmill import appenv
from treadmill import exc
from treadmill import fs
from treadmill import metrics
CORE_GROUPS = [
'apps',
'core',
'treadmill'
]
_LOGGER = logging.getLogger(__name__)
def _sys_svcs(root_dir):
"""Contructs list of system services."""
return sorted([
os.path.basename(s)
for s in glob.glob(os.path.join(root_dir, 'init', '*'))
if not (s.endswith('.out') or s.endswith('.err'))])
class CgroupReader(object):
"""Cgroup reader engine to spawn new thread to read cgroup periodically
"""
def __init__(self, approot, interval):
self.cache = {'treadmill': {}, 'core': {}, 'app': {}}
self._interval = interval
self._tm_env = appenv.AppEnvironment(root=approot)
self._sys_svcs = _sys_svcs(approot)
# TODO: sys_maj_min will be used changing treadmill.metrics.app_metrics
self._sys_maj_min = '{}:{}'.format(*fs.path_to_maj_min(approot))
self._sys_block_dev = fs.maj_min_to_blk(*fs.path_to_maj_min(approot))
# if interval is zero, we just read one time
if interval <= 0:
self._read()
else:
self._loop()
def get(self, cgrp_name):
"""Get a cgroup data"""
(data_type, cgrp) = cgrp_name.split('.', 1)
return self.cache[data_type][cgrp]
def snapshot(self):
"""Get all cgroups data in cache"""
return self.cache
def list(self):
"""Get all cgroups item name in a list"""
return (
['treadmill.{}'.format(cgrp)
for cgrp in self.cache['treadmill'].keys()] +
['core.{}'.format(cgrp) for cgrp in self.cache['core'].keys()] +
['app.{}'.format(cgrp) for cgrp in self.cache['app'].keys()]
)
def _loop(self):
before = time.time()
self._read()
now = time.time()
next_wait = int(self._interval + before - now)
# should not happen as it means _read() lasting too much time
if next_wait < 0:
next_wait = 0
threading.Timer(next_wait, self._loop).start()
def _get_block_dev_version(self, app_unique_name):
try:
localdisk = self._tm_env.svc_localdisk.get(app_unique_name)
blkio_major_minor = '{major}:{minor}'.format(
major=localdisk['dev_major'],
minor=localdisk['dev_minor'],
)
block_dev = localdisk['block_dev']
except (exc.TreadmillError, IOError, OSError):
blkio_major_minor = None
block_dev = None
return (block_dev, blkio_major_minor)
def _read(self):
_LOGGER.info("start reading cgroups")
sys_block_dev = self._sys_block_dev
for cgrp in CORE_GROUPS:
if cgrp == 'treadmill':
self.cache['treadmill'][cgrp] = metrics.app_metrics(
cgrp, sys_block_dev
)
else:
core_cgrp = os.path.join('treadmill', cgrp)
self.cache['treadmill'][cgrp] = metrics.app_metrics(
core_cgrp, None
)
for svc in self._sys_svcs:
svc_cgrp = os.path.join('treadmill', 'core', svc)
self.cache['core'][svc] = metrics.app_metrics(
svc_cgrp, None
)
seen_apps = set()
for app_dir in glob.glob('%s/*' % self._tm_env.apps_dir):
if not os.path.isdir(app_dir):
continue
app_unique_name = os.path.basename(app_dir)
seen_apps.add(app_unique_name)
(block_dev, _blkio_major_minor) = self._get_block_dev_version(
app_unique_name
)
app_cgrp = os.path.join('treadmill', 'apps', app_unique_name)
self.cache['app'][app_unique_name] = metrics.app_metrics(
app_cgrp, block_dev
)
# Removed metrics for apps that are not present anymore
for cgrp in set(self.cache['app']) - seen_apps:
del self.cache['app'][cgrp]
_LOGGER.info(
"%d core services, %d containers in cache",
len(self.cache['core']), len(self.cache['app'])
)
| {
"content_hash": "617dec343bee27f28e37abedb3706bc9",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 30.97945205479452,
"alnum_prop": 0.5529515808091975,
"repo_name": "captiosus/treadmill",
"id": "bd134b6aba1fa0a239621342374f4cb6cf63d89d",
"size": "4523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treadmill/metrics/engine.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2598791"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "58099"
}
],
"symlink_target": ""
} |
import threading
import time
import voodoo.log as log
from weblab.data.experiments import CommandSent, ExperimentUsage, FileSent
import weblab.core.file_storer as file_storer
import weblab.data.command as Command
class TemporalInformationRetriever(threading.Thread):
"""
This class retrieves continuously the information of initial and finished experiments.
"""
PRINT_ERRORS = True
def __init__(self, cfg_manager, initial_store, finished_store, commands_store, completed_store, db_manager):
threading.Thread.__init__(self)
self.cfg_manager = cfg_manager
self.keep_running = True
self.initial_store = initial_store
self.finished_store = finished_store
self.commands_store = commands_store
self.completed_store = completed_store
self.iterations = 0
self.db_manager = db_manager
self.timeout = None # Take the default of TemporalInformationStore
self.entry_id2command_id = {}
self.entry_id2command_id_lock = threading.Lock()
self.setDaemon(True)
def run(self):
while self.keep_running:
try:
self.iterations += 1
self.iterate()
except:
if self.PRINT_ERRORS:
import traceback
traceback.print_exc()
log.log( TemporalInformationRetriever, log.level.Critical, "Exception iterating in TemporalInformationRetriever!!!")
log.log_exc( TemporalInformationRetriever, log.level.Critical )
def stop(self):
self.keep_running = False
def iterate(self):
self.iterate_initial()
if self.keep_running and self.commands_store.empty() and self.completed_store.empty():
self.iterate_finish()
if self.keep_running:
self.iterate_command()
if self.keep_running:
self.iterate_completed()
def iterate_initial(self):
initial_information = self.initial_store.get(timeout=self.timeout)
if initial_information is not None:
initial_timestamp = time.mktime(initial_information.initial_time.timetuple()) + initial_information.initial_time.microsecond / 1e6
end_timestamp = time.mktime(initial_information.end_time.timetuple()) + initial_information.end_time.microsecond / 1e6
request_info = initial_information.request_info
from_ip = request_info.pop('from_ip','<address not found>')
try:
username = request_info.pop('username')
except:
log.log( TemporalInformationRetriever, log.level.Critical, "Provided information did not contain some required fields (such as username or role). This usually means that the reservation has previously been expired. Provided request_info: %r; provided data: %r" % (request_info, initial_information), max_size = 10000)
log.log_exc( TemporalInformationRetriever, log.level.Critical )
return
usage = ExperimentUsage()
usage.start_date = initial_timestamp
usage.from_ip = from_ip
usage.experiment_id = initial_information.experiment_id
usage.reservation_id = initial_information.reservation_id
usage.coord_address = initial_information.exp_coordaddr
usage.request_info = initial_information.request_info
command_request = CommandSent(
Command.Command("@@@initial::request@@@"), initial_timestamp,
Command.Command(str(initial_information.client_initial_data)), end_timestamp)
command_response = CommandSent(
Command.Command("@@@initial::response@@@"), initial_timestamp,
Command.Command(str(initial_information.initial_configuration)), end_timestamp)
usage.append_command(command_request)
usage.append_command(command_response)
self.db_manager.store_experiment_usage(username, usage)
def iterate_completed(self):
completed_information = self.completed_store.get(timeout=self.timeout)
if completed_information is not None:
username, usage, callback = completed_information
self.db_manager.store_experiment_usage(username, usage)
callback()
def iterate_finish(self):
information = self.finished_store.get(timeout=self.timeout)
if information is not None:
reservation_id, obj, initial_time, end_time = information
if not self.commands_store.empty() or not self.completed_store.empty():
# They have higher priority
self.finished_store.put(reservation_id, obj, initial_time, end_time)
return
initial_timestamp = time.mktime(initial_time.timetuple()) + initial_time.microsecond / 1e6
end_timestamp = time.mktime(end_time.timetuple()) + end_time.microsecond / 1e6
command = CommandSent(
Command.Command("@@@finish@@@"), initial_timestamp,
Command.Command(str(obj)), end_timestamp)
if not self.db_manager.finish_experiment_usage(reservation_id, initial_timestamp, command):
# If it could not be added because the experiment id
# did not exist, put it again in the queue
self.finished_store.put(reservation_id, obj, initial_time, end_time)
time.sleep(0.01)
def iterate_command(self):
information = self.commands_store.get(timeout=self.timeout)
if information is not None:
all_information = [ information ]
# Retrieve all the remaining information to ensure that it it finally empty,
# with a maximum of 1000 registries per request
max_registries = 1000
counter = 0
while not self.commands_store.empty() and counter < max_registries:
counter += 1
information = self.commands_store.get(timeout=0)
if information is not None:
all_information.append(information)
command_pairs = []
command_responses = []
command_requests = {}
file_pairs = []
file_responses = []
file_requests = {}
backup_information = {}
backup_information_responses = {}
# Process
for information in all_information:
if information.is_command:
if information.is_before:
backup_information[information.entry_id] = information
command_requests[information.entry_id] = (information.reservation_id, CommandSent( information.payload, information.timestamp))
else:
backup_information_responses[information.entry_id] = information
command_request = command_requests.pop(information.entry_id, None)
if command_request is not None:
reservation_id, command_sent = command_request
complete_command = CommandSent(
command_sent.command, command_sent.timestamp_before,
information.payload, information.timestamp)
command_pairs.append((reservation_id, information.entry_id, complete_command))
else:
with self.entry_id2command_id_lock:
command_id = self.entry_id2command_id.pop(information.entry_id, None)
if command_id is None:
self.commands_store.put(information)
else:
command_responses.append((information.entry_id, command_id, information.payload, information.timestamp))
else:
if information.is_before:
backup_information[information.entry_id] = information
file_requests[information.entry_id] = (information.reservation_id, information.payload)
else:
backup_information_responses[information.entry_id] = information
file_request = file_requests.pop(information.entry_id, None)
if file_request is not None:
reservation_id, file_sent = file_request
if file_sent.is_loaded():
storer = file_storer.FileStorer(self.cfg_manager, reservation_id)
stored = storer.store_file(self, file_sent.file_content, file_sent.file_info)
file_path = stored.file_path
file_hash = stored.file_hash
else:
file_path = file_sent.file_path
file_hash = file_sent.file_hash
complete_file = FileSent(file_path, file_hash, file_sent.timestamp_before,
information.payload, information.timestamp)
file_pairs.append((reservation_id, information.entry_id, complete_file))
else:
with self.entry_id2command_id_lock:
command_id = self.entry_id2command_id.pop(information.entry_id, None)
if command_id is None:
self.commands_store.put(information)
else:
file_responses.append((information.entry_id, command_id, information.payload, information.timestamp))
# At this point, we have all the information processed and
# ready to be passed to the database in a single commit
mappings = self.db_manager.store_commands(command_pairs, command_requests, command_responses, file_pairs, file_requests, file_responses)
elements_to_backup = []
with self.entry_id2command_id_lock:
for entry_id in mappings:
command_id = mappings[entry_id]
if command_id is not None and command_id is not False:
self.entry_id2command_id[entry_id] = mappings[entry_id]
else:
elements_to_backup.append(entry_id)
for entry_id in elements_to_backup:
if entry_id in backup_information:
self.commands_store.put(backup_information[entry_id])
if entry_id in backup_information_responses:
self.commands_store.put(backup_information_responses[entry_id])
| {
"content_hash": "c32ca47c5b21d9f135b9daf5e1f4316e",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 333,
"avg_line_length": 50.40723981900452,
"alnum_prop": 0.5702872531418313,
"repo_name": "zstars/weblabdeusto",
"id": "2060787451ef8b1d20f74a6545b0869091fd2545",
"size": "11519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/src/weblab/core/data_retriever.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ASP",
"bytes": "4785"
},
{
"name": "ActionScript",
"bytes": "8508"
},
{
"name": "ApacheConf",
"bytes": "122186"
},
{
"name": "Batchfile",
"bytes": "7753"
},
{
"name": "C",
"bytes": "19456"
},
{
"name": "C#",
"bytes": "315160"
},
{
"name": "C++",
"bytes": "9547"
},
{
"name": "CSS",
"bytes": "150709"
},
{
"name": "CoffeeScript",
"bytes": "30909"
},
{
"name": "Go",
"bytes": "7076"
},
{
"name": "HTML",
"bytes": "452001"
},
{
"name": "Java",
"bytes": "1234794"
},
{
"name": "JavaScript",
"bytes": "1656027"
},
{
"name": "Makefile",
"bytes": "1571"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "PHP",
"bytes": "155137"
},
{
"name": "Python",
"bytes": "3435335"
},
{
"name": "Shell",
"bytes": "2596"
},
{
"name": "Smarty",
"bytes": "20160"
},
{
"name": "VHDL",
"bytes": "5874"
}
],
"symlink_target": ""
} |
import argparse
import jinja2
import os
import signal
import subprocess
import sys
import traceback
from blessings import Terminal
from contextlib import contextmanager
signals = dict((k, v) for v, k in signal.__dict__.iteritems() if v.startswith('SIG'))
term = Terminal()
TEST_ROOT = os.getcwd()
env = jinja2.Environment(
trim_blocks=True,
lstrip_blocks=True,
loader=jinja2.FileSystemLoader(os.path.join(TEST_ROOT, 'util', 'template')),
)
@contextmanager
def chdir(d):
old = os.getcwd()
os.chdir(d)
yield
os.chdir(old)
def shell(*args, **kwargs):
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = p.communicate(kwargs.get('input', ''))
out = '\n'.join(((output[0] or '').strip(), (output[1] or '').strip()))
if p.returncode < 0:
sig = signals.get(-p.returncode)
if sig is not None:
out += '\n' + sig
return out.strip(), p.returncode
def walk(base):
for root, _, files in os.walk(base):
for name in files:
yield os.path.join(root, name)
class Test:
cmake_template = 'CMakeLists.j2'
build_dir = 'build/mock'
def __init__(self, path, tail):
self.path = path
self.name = os.path.splitext(tail)[0].strip('/')
self.exe = os.path.basename(self.name)
self.dir = self.name.rsplit(self.exe, 1)[0].strip('/')
self.ran = False
self.success = None
self.build_failed = False
self.output = ''
@classmethod
def find(cls, base, filt):
tests = []
for path in walk(base):
tail = path.replace(base, '', 1)
test = None
if path.endswith('.c'):
if os.path.basename(path).startswith('_'):
test = PureCTest(path, tail)
else:
test = Test(path, tail)
elif path.endswith('.py'):
test = PythonTest(path, tail)
else:
continue
if test and filt:
for f in filt:
if test.name.startswith(f):
break
else:
continue
tests.append(test)
return tests
@property
def status(self):
if self.build_failed:
return 'build failed'
elif not self.ran:
return 'skipped'
return 'pass' if self.success else 'fail'
@property
def status_color(self):
if self.build_failed:
return term.red
elif not self.ran:
return term.yellow
return term.green if self.success else term.red
def build(self, project):
junk_dir = os.path.join(TEST_ROOT, self.build_dir)
bin_dir = os.path.join(TEST_ROOT, 'bin', self.dir)
if not os.path.exists(junk_dir):
os.makedirs(junk_dir)
cmakelists = os.path.join(junk_dir, 'CMakeLists.txt')
t = env.get_template(self.cmake_template)
txt = t.render(
project=args.project,
exe=self.exe,
sources=self.path,
bin_dir=bin_dir,
util=os.path.join(TEST_ROOT, 'util'),
)
with open(cmakelists, 'w') as f:
f.write(txt)
out, status = shell('cmake', cmakelists)
if status:
self.output = out
self.build_failed = True
return False
with chdir(junk_dir):
out, status = shell('make', '-j2')
if status:
self.output = out
self.build_failed = True
return False
tmp = os.path.join(bin_dir, 'tmp')
out = os.path.join(bin_dir, self.exe)
if os.path.exists(out):
os.unlink(out)
os.rename(tmp, out)
return True
def run(self):
bin_dir = os.path.join(TEST_ROOT, 'bin', self.dir)
with chdir(bin_dir):
self.output, status = shell('./' + self.exe)
self.ran = True
self.success = (status == 0)
return self.success
def __repr__(self):
if self.ran:
return '<Test: {} ({})>'.format(self.name, self.status)
else:
return '<Test: {}>'.format(self.name)
class PureCTest(Test):
build_dir = 'build/pure'
cmake_template = 'CMakeLists_pure.j2'
class PythonTest(Test):
def build(self, project):
return True
def run(self):
with chdir(TEST_ROOT):
self.output, status = shell('python', self.path)
self.ran = True
self.success = (status == 0)
return False
def run(args):
tests = Test.find(args.base, args.tests)
tests.sort(key=lambda t: (t.__class__, t.name))
if not tests:
print 'No tests!'
return
step_fmt = lambda step: term.bold('[' + step + ']')
status_fmt = lambda test: term.bold('[' + test.status_color(test.status) + ']')
back = lambda mult: '\b' * mult
out = lambda *a: (sys.stdout.write(' '.join(str(s) for s in a)), sys.stdout.flush())
duplicate_errors = set()
for i, test in enumerate(tests):
headline = '[{}/{}] {} ['.format(i + 1, len(tests), test.name)
print term.bold(headline.ljust(79, '-')),
out(back(8) + term.bold('-') + step_fmt('build'))
try:
build = test.build(args.project)
except Exception:
test.build_failed = True
print
traceback.print_exc()
out(back(7) + step_fmt(' run '))
if not test.build_failed:
try:
success = test.run()
except Exception:
test.ran = True
success = test.success = False
print
traceback.print_exc()
out(back(max(7, len(test.status) + 3)) + term.bold('--') + status_fmt(test))
print
if test.output:
if 'ERROR' in test.output or 'Assertion failed' in test.output:
test.success = False
if test.build_failed:
if test.output in duplicate_errors:
continue
else:
duplicate_errors.add(test.output)
for line in test.output.split('\n'):
ASSERT = term.bold(term.red('Assertion failed'))
ERROR = term.bold(term.red('ERROR:'))
WARNING = term.bold(term.yellow('WARNING:'))
line = line.decode('utf8', 'replace')
line = line.replace('Assertion failed', ASSERT)
if line.startswith('ERROR'):
line = line.replace('ERROR:', ERROR, 1)
elif line.startswith('WARNING'):
line = line.replace('WARNING:', WARNING, 1)
if test.build_failed:
line = line.replace('error:', ERROR)
line = line.replace('warning:', WARNING)
print '> {}'.format(line.encode('utf8', 'replace'))
passed = sum(t.success for t in tests if t.ran)
total = sum(t.ran for t in tests)
results = '{} / {} passed, {} skipped '.format(passed, total, len(tests) - total)
if total > 0:
pc = passed / float(total) * 100
percent = '{:.2f}%'.format(pc)
if passed == total:
percent = term.green('100%')
elif pc < 75:
percent = term.red(percent)
else:
percent = term.yellow(percent)
print term.bold((results + '[{}]').format(percent).rjust(80 + len(term.green(''))))
print
if passed < len(tests):
return sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Build and run tests.')
parser.add_argument('--project', help='project directory', default='.')
parser.add_argument('--base', help='test directories to search', required=True)
parser.add_argument('tests', help='test names to run (all by default)', nargs='*')
args = parser.parse_args()
run(args)
| {
"content_hash": "80d9e2076967b8c3667610278db0024a",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 101,
"avg_line_length": 30.84732824427481,
"alnum_prop": 0.5308092056421678,
"repo_name": "RobertoMalatesta/glshim",
"id": "7af7110ecfe03f5106f5022b9f6adcc20da7aa54",
"size": "8082",
"binary": false,
"copies": "2",
"ref": "refs/heads/unstable",
"path": "test/util/run.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13771684"
},
{
"name": "C++",
"bytes": "63232"
},
{
"name": "CMake",
"bytes": "2282"
},
{
"name": "Python",
"bytes": "16557"
},
{
"name": "Shell",
"bytes": "1415"
}
],
"symlink_target": ""
} |
"""
pghoard - pg_basebackup handler
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import datetime
import io
import logging
import os
import select
import socket
import stat
import subprocess
import time
from concurrent.futures import ThreadPoolExecutor
from queue import Empty, Queue
from tempfile import NamedTemporaryFile
from threading import Thread
import psycopg2
from pghoard.rohmu import dates, errors, rohmufile
from pghoard.rohmu.compat import suppress
# pylint: disable=superfluous-parens
from . import common, version, wal
from .common import (
connection_string_using_pgpass, replication_connection_string_and_slot_using_pgpass, set_stream_nonblocking,
set_subprocess_stdout_and_stderr_nonblocking, terminate_subprocess
)
from .patchedtarfile import tarfile
BASEBACKUP_NAME = "pghoard_base_backup"
EMPTY_DIRS = [
"pg_dynshmem",
"pg_log",
"pg_replslot",
"pg_snapshot",
"pg_stat_tmp",
"pg_tblspc",
"pg_wal",
"pg_wal/archive_status",
"pg_xlog",
"pg_xlog/archive_status",
]
class BackupFailure(Exception):
"""Backup failed - post a failure to callback_queue and allow the thread to terminate"""
class NoException(BaseException):
"""Exception that's never raised, used in conditional except blocks"""
class PGBaseBackup(Thread):
def __init__(
self,
config,
site,
connection_info,
basebackup_path,
compression_queue,
metrics,
transfer_queue=None,
callback_queue=None,
pg_version_server=None,
metadata=None
):
super().__init__()
self.log = logging.getLogger("PGBaseBackup")
self.config = config
self.site = site
self.connection_info = connection_info
self.basebackup_path = basebackup_path
self.callback_queue = callback_queue
self.chunks_on_disk = 0
self.compression_queue = compression_queue
self.metadata = metadata or {}
self.metrics = metrics
self.transfer_queue = transfer_queue
self.running = True
self.pid = None
self.pg_version_server = pg_version_server
self.latest_activity = datetime.datetime.utcnow()
def run(self):
try:
basebackup_mode = self.config["backup_sites"][self.site]["basebackup_mode"]
if basebackup_mode == "basic":
self.run_basic_basebackup()
elif basebackup_mode == "local-tar":
self.run_local_tar_basebackup()
elif basebackup_mode == "pipe":
self.run_piped_basebackup()
else:
raise errors.InvalidConfigurationError("Unsupported basebackup_mode {!r}".format(basebackup_mode))
except Exception as ex: # pylint: disable=broad-except
if isinstance(ex, (BackupFailure, errors.InvalidConfigurationError)):
self.log.error(str(ex))
else:
self.log.exception("Backup unexpectedly failed")
self.metrics.unexpected_exception(ex, where="PGBaseBackup")
if self.callback_queue:
# post a failure event
self.callback_queue.put({"success": False})
finally:
self.running = False
@staticmethod
def get_paths_for_backup(basebackup_path):
i = 0
while True:
tsdir = "{}_{}".format(datetime.datetime.utcnow().strftime("%Y-%m-%d_%H-%M"), i)
raw_basebackup = os.path.join(basebackup_path + "_incoming", tsdir)
compressed_basebackup = os.path.join(basebackup_path, tsdir)
# The backup directory names need not to be a sequence, so we lean towards skipping over any
# partial or leftover progress below. Make sure we only return paths if we're able to create the
# raw_basebackup directory.
if not os.path.exists(raw_basebackup) and not os.path.exists(compressed_basebackup):
with suppress(FileExistsError):
os.makedirs(raw_basebackup)
return raw_basebackup, compressed_basebackup
i += 1
def get_command_line(self, output_name):
command = [
self.config["backup_sites"][self.site]["pg_basebackup_path"],
"--format",
"tar",
"--label",
BASEBACKUP_NAME,
"--verbose",
"--pgdata",
output_name,
]
if self.config["backup_sites"][self.site]["active_backup_mode"] == "standalone_hot_backup":
if self.pg_version_server >= 100000:
command.extend(["--wal-method=fetch"])
else:
command.extend(["--xlog-method=fetch"])
elif self.pg_version_server >= 100000:
command.extend(["--wal-method=none"])
connection_string, _ = replication_connection_string_and_slot_using_pgpass(self.connection_info)
command.extend(["--progress", "--dbname", connection_string])
return command
def check_command_success(self, proc, output_file):
rc = terminate_subprocess(proc, log=self.log)
msg = "Ran: {!r}, took: {:.3f}s to run, returncode: {}".format(
proc.args,
time.monotonic() - proc.basebackup_start_time, rc
)
if rc == 0 and os.path.exists(output_file):
self.log.info(msg)
return True
if output_file:
with suppress(FileNotFoundError):
os.unlink(output_file)
raise BackupFailure(msg)
def basebackup_compression_pipe(self, proc, basebackup_path):
rsa_public_key = None
encryption_key_id = self.config["backup_sites"][self.site]["encryption_key_id"]
if encryption_key_id:
rsa_public_key = self.config["backup_sites"][self.site]["encryption_keys"][encryption_key_id]["public"]
compression_algorithm = self.config["compression"]["algorithm"]
compression_level = self.config["compression"]["level"]
self.log.debug("Compressing basebackup directly to file: %r", basebackup_path)
set_stream_nonblocking(proc.stderr)
metadata = {
"compression-algorithm": compression_algorithm,
"encryption-key-id": encryption_key_id,
"host": socket.gethostname(),
}
with NamedTemporaryFile(prefix=basebackup_path, suffix=".tmp-compress") as output_obj:
def extract_header_func(input_data):
# backup_label should always be first in the tar ball
if input_data[0:12].startswith(b"backup_label"):
# skip the 512 byte tar header to get to the actual backup label content
start_wal_segment, start_time = self.parse_backup_label(input_data[512:1024])
metadata.update({"start-wal-segment": start_wal_segment, "start-time": start_time})
def progress_callback():
stderr_data = proc.stderr.read()
if stderr_data:
self.latest_activity = datetime.datetime.utcnow()
self.log.debug("pg_basebackup stderr: %r", stderr_data)
original_input_size, compressed_file_size = rohmufile.write_file(
input_obj=proc.stdout,
output_obj=output_obj,
compression_algorithm=compression_algorithm,
compression_level=compression_level,
rsa_public_key=rsa_public_key,
progress_callback=progress_callback,
log_func=self.log.info,
header_func=extract_header_func
)
os.link(output_obj.name, basebackup_path)
if original_input_size:
size_ratio = compressed_file_size / original_input_size
self.metrics.gauge(
"pghoard.compressed_size_ratio",
size_ratio,
tags={
"algorithm": compression_algorithm,
"site": self.site,
"type": "basebackup",
}
)
return original_input_size, compressed_file_size, metadata
def run_piped_basebackup(self):
# In a piped basebackup we're not able to read backup_label and must figure out the start wal segment
# on our own. Note that this WAL file value will only be correct if no other basebackups are run in
# parallel. PGHoard itself will never do this itself but if the user starts one on his own we'll get
# an incorrect start-wal-time since the pg_basebackup from pghoard will not generate a new checkpoint.
# This means that this WAL information would not be the oldest required to restore from this
# basebackup.
connection_string, _ = replication_connection_string_and_slot_using_pgpass(self.connection_info)
start_wal_segment = wal.get_current_wal_from_identify_system(connection_string)
temp_basebackup_dir, compressed_basebackup = self.get_paths_for_backup(self.basebackup_path)
command = self.get_command_line("-")
self.log.debug("Starting to run: %r", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
setattr(proc, "basebackup_start_time", time.monotonic())
self.pid = proc.pid
self.log.info("Started: %r, running as PID: %r, basebackup_location: %r", command, self.pid, compressed_basebackup)
stream_target = os.path.join(temp_basebackup_dir, "data.tmp")
# catch any os level exceptions such out of disk space, so that the underlying
# OS process gets properly cleaned up by check_command_success
try:
original_input_size, compressed_file_size, metadata = \
self.basebackup_compression_pipe(proc, stream_target)
except OSError as e:
self.log.error(
"basebackup_compression_pipe(%r, %r) failed with %r. "
"Ignoring; check_command_success will detect this.", proc, stream_target, e
)
self.metrics.unexpected_exception(e, where="PGBaseBackup")
self.check_command_success(proc, stream_target)
os.rename(stream_target, compressed_basebackup)
# Since we might not be able to parse the backup label we cheat with the start-wal-segment and
# start-time a bit. The start-wal-segment is the segment currently being written before
# the backup and the start_time is taken _after_ the backup has completed and so is conservatively
# in the future but not exactly correct. These both are valid only as long as no other
# basebackups than those controlled by pghoard are currently running at the same time.
# pg_basebackups are taken simultaneously directly or through other backup managers the WAL
# file will be incorrect since a new checkpoint will not be issued for a parallel backup
if "start-wal-segment" not in metadata:
metadata.update({"start-wal-segment": start_wal_segment})
if "start-time" not in metadata:
metadata.update({"start-time": datetime.datetime.now(datetime.timezone.utc).isoformat()})
metadata.update({
"original-file-size": original_input_size,
"pg-version": self.pg_version_server,
})
metadata.update(self.metadata)
self.transfer_queue.put({
"callback_queue": self.callback_queue,
"file_size": compressed_file_size,
"filetype": "basebackup",
"local_path": compressed_basebackup,
"metadata": metadata,
"site": self.site,
"type": "UPLOAD",
})
def parse_backup_label(self, backup_label_data):
if isinstance(backup_label_data, str):
backup_label_data = backup_label_data.encode("utf-8")
for line in backup_label_data.split(b"\n"):
if line.startswith(b"START WAL LOCATION"):
start_wal_segment = line.split()[5].strip(b")").decode("utf8")
elif line.startswith(b"START TIME: "):
start_time_text = line[len("START TIME: "):].decode("utf8")
start_time_dt = dates.parse_timestamp(start_time_text, assume_local=True)
start_time = start_time_dt.isoformat()
self.log.debug("Found: %r as starting wal segment, start_time: %r", start_wal_segment, start_time)
return start_wal_segment, start_time
def parse_backup_label_in_tar(self, basebackup_path):
with tarfile.TarFile(name=basebackup_path, mode="r") as tar:
content = tar.extractfile("backup_label").read() # pylint: disable=no-member
return self.parse_backup_label(content)
def run_basic_basebackup(self):
basebackup_directory, _ = self.get_paths_for_backup(self.basebackup_path)
basebackup_tar_file = os.path.join(basebackup_directory, "base.tar")
command = self.get_command_line(basebackup_directory)
self.log.debug("Starting to run: %r", command)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
setattr(proc, "basebackup_start_time", time.monotonic())
self.pid = proc.pid
self.log.info("Started: %r, running as PID: %r, basebackup_location: %r", command, self.pid, basebackup_tar_file)
set_subprocess_stdout_and_stderr_nonblocking(proc)
while self.running:
rlist, _, _ = select.select([proc.stdout, proc.stderr], [], [], 1.0)
for fd in rlist:
content = fd.read()
if content:
self.log.debug(content)
self.latest_activity = datetime.datetime.utcnow()
if proc.poll() is not None:
break
self.check_command_success(proc, basebackup_tar_file)
start_wal_segment, start_time = self.parse_backup_label_in_tar(basebackup_tar_file)
self.compression_queue.put({
"callback_queue": self.callback_queue,
"full_path": basebackup_tar_file,
"metadata": {
**self.metadata,
"start-time": start_time,
"start-wal-segment": start_wal_segment,
},
"type": "CLOSE_WRITE",
})
def get_control_entries_for_tar(self, *, metadata, pg_control, backup_label):
mtime = time.time()
blob = io.BytesIO(common.json_encode(metadata, binary=True))
ti = tarfile.TarInfo(name=".pghoard_tar_metadata.json")
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Backup the latest version of pg_control
blob = io.BytesIO(pg_control)
ti = tarfile.TarInfo(name=os.path.join("pgdata", "global", "pg_control"))
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Add the given backup_label to the tar after calling pg_stop_backup()
blob = io.BytesIO(backup_label)
ti = tarfile.TarInfo(name=os.path.join("pgdata", "backup_label"))
ti.size = len(blob.getbuffer())
ti.mtime = mtime
yield ti, blob, False
# Create directory entries for empty directories
for dirname in EMPTY_DIRS:
ti = tarfile.TarInfo(name=os.path.join("pgdata", dirname))
ti.type = tarfile.DIRTYPE
ti.mode = 0o700
ti.mtime = mtime
yield ti, None, False
def write_files_to_tar(self, *, files, tar):
for archive_path, local_path, missing_ok in files:
if not self.running:
raise BackupFailure("thread termination requested")
if isinstance(archive_path, tarfile.TarInfo):
tar.addfile(archive_path, local_path)
continue
try:
tar.add(local_path, arcname=archive_path, recursive=False)
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("File %r went away while writing to tar, ignoring", local_path)
def find_files_to_backup(self, *, pgdata, tablespaces):
def add_directory(archive_parent, local_parent, *, missing_ok):
# Scan and add a single directory
try:
contents = os.listdir(local_parent)
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("Directory %r went away while scanning, ignoring", local_parent)
return
for fn in sorted(contents):
# Ignore all temporary files and directories as well as well
# as pg_control, we'll grab the latest version of pg_control
# after everything else has been copied.
if fn == "pg_control" or fn.startswith("pgsql_tmp"):
continue
local_path = os.path.join(local_parent, fn)
archive_path = os.path.join(archive_parent, fn)
yield from add_entry(archive_path, local_path, missing_ok=missing_ok)
def add_entry(archive_path, local_path, *, missing_ok):
# Recursively add files and directories
try:
st_mode = os.stat(local_path).st_mode
except (FileNotFoundError if missing_ok else NoException):
self.log.warning("File %r went away while scanning, ignoring", local_path)
return
if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):
yield archive_path, local_path, missing_ok, "add"
elif stat.S_ISDIR(st_mode):
yield archive_path, local_path, missing_ok, "enter"
# Everything but top-level items are allowed to be missing
yield from add_directory(archive_path, local_path, missing_ok=True)
yield archive_path, local_path, missing_ok, "leave"
else:
self.log.error("File %r is not a directory, file or symlink, ignoring", local_path)
# Iterate over top-level $PGDATA
for fn in os.listdir(pgdata):
local_path = os.path.join(pgdata, fn)
archive_path = os.path.join("pgdata", fn)
# Skip temporary / runtime files such as postmaster.pid, postmaster.opts and files ending with ~,
# .tmp or .old or starting with .s. or pgsql_tmp. These are some of the filename matches and patterns
# PostgreSQL own replication code recognizes.
# NOTE: backup_label and various empty directories are handled by write_init_entries_to_tar
# NOTE: We also ignore tablespace_map because we store tablespace information elsewhere and
# reconstruct tablespace links in restore.py using our custom metadata and/or user supplied
# options.
# TODO: Use a top-level whitelist?
if fn in EMPTY_DIRS or \
fn == "postmaster.opts" or \
fn == "postmaster.pid" or \
fn == "backup_label" or \
fn == "tablespace_map" or \
fn.endswith(".old") or \
fn.endswith(".tmp") or \
fn.endswith("~") or \
fn.startswith(".s.") or \
fn.startswith("pgsql_tmp"):
continue
yield from add_entry(archive_path, local_path, missing_ok=False)
# Add a "tablespaces" directory with same metadata as $PGDATA
for spcname, spcinfo in tablespaces.items():
local_path = spcinfo["path"]
archive_path = os.path.join("tablespaces", spcname)
yield archive_path, local_path, False, "enter"
yield from add_directory(archive_path, local_path, missing_ok=False)
yield archive_path, local_path, False, "leave"
def tar_one_file(
self, *, temp_dir, chunk_path, files_to_backup, callback_queue, filetype="basebackup_chunk", extra_metadata=None
):
start_time = time.monotonic()
site_config = self.config["backup_sites"][self.site]
encryption_key_id = site_config["encryption_key_id"]
if encryption_key_id:
rsa_public_key = site_config["encryption_keys"][encryption_key_id]["public"]
else:
rsa_public_key = None
with NamedTemporaryFile(dir=temp_dir, prefix=os.path.basename(chunk_path), suffix=".tmp") as raw_output_obj:
# pylint: disable=bad-continuation
with rohmufile.file_writer(
compression_algorithm=self.config["compression"]["algorithm"],
compression_level=self.config["compression"]["level"],
compression_threads=site_config["basebackup_compression_threads"],
rsa_public_key=rsa_public_key,
fileobj=raw_output_obj
) as output_obj:
with tarfile.TarFile(fileobj=output_obj, mode="w") as output_tar:
self.write_files_to_tar(files=files_to_backup, tar=output_tar)
input_size = output_obj.tell()
result_size = raw_output_obj.tell()
# Make the file persist over the with-block with this hardlink
os.link(raw_output_obj.name, chunk_path)
rohmufile.log_compression_result(
encrypted=bool(encryption_key_id),
elapsed=time.monotonic() - start_time,
original_size=input_size,
result_size=result_size,
source_name="$PGDATA files ({})".format(len(files_to_backup)),
log_func=self.log.info,
)
size_ratio = result_size / input_size
self.metrics.gauge(
"pghoard.compressed_size_ratio",
size_ratio,
tags={
"algorithm": self.config["compression"]["algorithm"],
"site": self.site,
"type": "basebackup",
}
)
metadata = {
"compression-algorithm": self.config["compression"]["algorithm"],
"encryption-key-id": encryption_key_id,
"format": "pghoard-bb-v2",
"original-file-size": input_size,
"host": socket.gethostname(),
}
if extra_metadata:
metadata.update(extra_metadata)
self.transfer_queue.put({
"callback_queue": callback_queue,
"file_size": result_size,
"filetype": filetype,
"local_path": chunk_path,
"metadata": metadata,
"site": self.site,
"type": "UPLOAD",
})
# Get the name of the chunk and the name of the parent directory (ie backup "name")
chunk_name = "/".join(chunk_path.split("/")[-2:])
return chunk_name, input_size, result_size
def wait_for_chunk_transfer_to_complete(self, chunk_count, upload_results, chunk_callback_queue, start_time):
try:
upload_results.append(chunk_callback_queue.get(timeout=3.0))
self.log.info("Completed a chunk transfer successfully: %r", upload_results[-1])
return True
except Empty:
self.log.warning(
"Upload status: %r/%r handled, time taken: %r", len(upload_results), chunk_count,
time.monotonic() - start_time
)
return False
def handle_single_chunk(self, *, chunk_callback_queue, chunk_path, chunks, index, temp_dir):
one_chunk_files = chunks[index]
chunk_name, input_size, result_size = self.tar_one_file(
callback_queue=chunk_callback_queue,
chunk_path=chunk_path,
temp_dir=temp_dir,
files_to_backup=one_chunk_files,
)
self.log.info(
"Queued backup chunk %r for transfer, chunks on disk (including partial): %r, current: %r, total chunks: %r",
chunk_name, self.chunks_on_disk + 1, index, len(chunks)
)
return {
"chunk_filename": chunk_name,
"input_size": input_size,
"result_size": result_size,
"files": [chunk[0] for chunk in one_chunk_files]
}
def create_and_upload_chunks(self, chunks, data_file_format, temp_base_dir):
start_time = time.monotonic()
chunk_files = []
upload_results = []
chunk_callback_queue = Queue()
self.chunks_on_disk = 0
i = 0
site_config = self.config["backup_sites"][self.site]
max_chunks_on_disk = site_config["basebackup_chunks_in_progress"]
threads = site_config["basebackup_threads"]
with ThreadPoolExecutor(max_workers=threads) as tpe:
pending_compress_and_encrypt_tasks = []
while i < len(chunks):
if len(pending_compress_and_encrypt_tasks) >= threads:
# Always expect tasks to complete in order. This can slow down the progress a bit in case
# one chunk is much slower to process than others but typically the chunks don't differ much
# and this assumption greatly simplifies the logic.
task_to_wait = pending_compress_and_encrypt_tasks.pop(0)
chunk_files.append(task_to_wait.result())
if self.chunks_on_disk < max_chunks_on_disk:
chunk_id = i + 1
task = tpe.submit(
self.handle_single_chunk,
chunk_callback_queue=chunk_callback_queue,
chunk_path=data_file_format(chunk_id),
chunks=chunks,
index=i,
temp_dir=temp_base_dir,
)
pending_compress_and_encrypt_tasks.append(task)
self.chunks_on_disk += 1
i += 1
else:
if self.wait_for_chunk_transfer_to_complete(
len(chunks), upload_results, chunk_callback_queue, start_time
):
self.chunks_on_disk -= 1
for task in pending_compress_and_encrypt_tasks:
chunk_files.append(task.result())
while len(upload_results) < len(chunk_files):
self.wait_for_chunk_transfer_to_complete(len(chunks), upload_results, chunk_callback_queue, start_time)
return chunk_files
def run_local_tar_basebackup(self):
pgdata = self.config["backup_sites"][self.site]["pg_data_directory"]
if not os.path.isdir(pgdata):
raise errors.InvalidConfigurationError("pg_data_directory {!r} does not exist".format(pgdata))
temp_base_dir, compressed_base = self.get_paths_for_backup(self.basebackup_path)
os.makedirs(compressed_base)
data_file_format = "{}/{}.{{0:08d}}.pghoard".format(compressed_base, os.path.basename(compressed_base)).format
# Default to 2GB chunks of uncompressed data
target_chunk_size = self.config["backup_sites"][self.site]["basebackup_chunk_size"]
self.log.debug("Connecting to database to start backup process")
connection_string = connection_string_using_pgpass(self.connection_info)
with psycopg2.connect(connection_string) as db_conn:
cursor = db_conn.cursor()
if self.pg_version_server >= 90600:
# We'll always use the the non-exclusive backup mode on 9.6 and newer
cursor.execute("SELECT pg_start_backup(%s, true, false)", [BASEBACKUP_NAME])
backup_label = None
backup_mode = "non-exclusive"
else:
# On older versions, first check if we're in recovery, and find out the version of a possibly
# installed pgespresso extension. We use pgespresso's backup control functions when they're
# available, and require them in case we're running on a replica. We also make sure the
# extension version is 1.2 or newer to prevent crashing when using tablespaces.
cursor.execute(
"SELECT pg_is_in_recovery(), "
" (SELECT extversion FROM pg_extension WHERE extname = 'pgespresso')"
)
in_recovery, pgespresso_version = cursor.fetchone()
if in_recovery and (not pgespresso_version or pgespresso_version < "1.2"):
raise errors.InvalidConfigurationError(
"pgespresso version 1.2 or higher must be installed "
"to take `local-tar` backups from a replica"
)
if pgespresso_version and pgespresso_version >= "1.2":
cursor.execute("SELECT pgespresso_start_backup(%s, true)", [BASEBACKUP_NAME])
backup_label = cursor.fetchone()[0]
backup_mode = "pgespresso"
else:
try:
cursor.execute("SELECT pg_start_backup(%s, true)", [BASEBACKUP_NAME])
except psycopg2.OperationalError as ex:
self.log.warning("Exclusive pg_start_backup() failed: %s: %s", ex.__class__.__name__, ex)
db_conn.rollback()
if "a backup is already in progress" not in str(ex):
raise
self.log.info("Calling pg_stop_backup() and retrying")
cursor.execute("SELECT pg_stop_backup()")
cursor.execute("SELECT pg_start_backup(%s, true)", [BASEBACKUP_NAME])
with open(os.path.join(pgdata, "backup_label"), "r") as fp:
backup_label = fp.read()
backup_mode = "legacy"
backup_stopped = False
try:
# Look up tablespaces and resolve their current filesystem locations
cursor.execute("SELECT oid, spcname FROM pg_tablespace WHERE spcname NOT IN ('pg_default', 'pg_global')")
tablespaces = {
spcname: {
"path": os.readlink(os.path.join(pgdata, "pg_tblspc", str(oid))),
"oid": oid,
}
for oid, spcname in cursor.fetchall()
}
db_conn.commit()
self.log.info("Starting to backup %r and %r tablespaces to %r", pgdata, len(tablespaces), compressed_base)
start_time = time.monotonic()
total_file_count, chunks = self.find_and_split_files_to_backup(
pgdata=pgdata, tablespaces=tablespaces, target_chunk_size=target_chunk_size
)
# Tar up the chunks and submit them for upload; note that we start from chunk 1 here; chunk 0
# is reserved for special files and metadata and will be generated last.
chunk_files = self.create_and_upload_chunks(chunks, data_file_format, temp_base_dir)
# Everything is now tarred up, grab the latest pg_control and stop the backup process
with open(os.path.join(pgdata, "global", "pg_control"), "rb") as fp:
pg_control = fp.read()
# Call the stop backup functions now to get backup label for 9.6+ non-exclusive backups
if backup_mode == "non-exclusive":
cursor.execute("SELECT labelfile FROM pg_stop_backup(false)")
backup_label = cursor.fetchone()[0]
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
backup_stopped = True
total_size_plain = sum(item["input_size"] for item in chunk_files)
total_size_enc = sum(item["result_size"] for item in chunk_files)
self.log.info(
"Basebackup generation finished, %r files, %r chunks, "
"%r byte input, %r byte output, took %r seconds, waiting to upload", total_file_count, len(chunk_files),
total_size_plain, total_size_enc,
time.monotonic() - start_time
)
finally:
db_conn.rollback()
if not backup_stopped:
if backup_mode == "non-exclusive":
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
backup_label_data = backup_label.encode("utf-8")
backup_start_wal_segment, backup_start_time = self.parse_backup_label(backup_label_data)
backup_end_wal_segment, backup_end_time = self.get_backup_end_segment_and_time(db_conn, backup_mode)
# Generate and upload the metadata chunk
metadata = {
"backup_end_time": backup_end_time,
"backup_end_wal_segment": backup_end_wal_segment,
"backup_start_time": backup_start_time,
"backup_start_wal_segment": backup_start_wal_segment,
"chunks": chunk_files,
"pgdata": pgdata,
"pghoard_object": "basebackup",
"pghoard_version": version.__version__,
"tablespaces": tablespaces,
"host": socket.gethostname(),
}
control_files = list(
self.get_control_entries_for_tar(
metadata=metadata,
pg_control=pg_control,
backup_label=backup_label_data,
)
)
self.tar_one_file(
callback_queue=self.callback_queue,
chunk_path=data_file_format(0),
temp_dir=temp_base_dir,
files_to_backup=control_files,
filetype="basebackup",
extra_metadata={
**self.metadata,
"end-time": backup_end_time,
"end-wal-segment": backup_end_wal_segment,
"pg-version": self.pg_version_server,
"start-time": backup_start_time,
"start-wal-segment": backup_start_wal_segment,
"total-size-plain": total_size_plain,
"total-size-enc": total_size_enc,
},
)
def find_and_split_files_to_backup(self, *, pgdata, tablespaces, target_chunk_size):
total_file_count = 0
one_chunk_size = 0
one_chunk_files = []
chunks = []
entered_folders = []
# Generate a list of chunks
for archive_path, local_path, missing_ok, operation in \
self.find_files_to_backup(pgdata=pgdata, tablespaces=tablespaces):
if operation == "leave":
entered_folders.pop()
continue
file_size = os.path.getsize(local_path)
# Switch chunks if the current chunk has at least 20% data and the new chunk would tip it over
if one_chunk_size > target_chunk_size / 5 and one_chunk_size + file_size > target_chunk_size:
chunks.append(one_chunk_files)
one_chunk_size = 0
one_chunk_files = entered_folders.copy()
total_file_count += 1
one_chunk_size += file_size
if operation == "enter":
entered_folders.append([archive_path, local_path, missing_ok])
one_chunk_files.append([archive_path, local_path, missing_ok])
chunks.append(one_chunk_files)
return total_file_count, chunks
def get_backup_end_segment_and_time(self, db_conn, backup_mode):
"""Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint.
"""
cursor = db_conn.cursor()
# Get backup end time and end segment and forcibly register a transaction in the current segment
# Also check if we're a superuser and can directly call pg_switch_xlog()/pg_switch_wal() later.
# Note that we can't call pg_walfile_name() or pg_current_wal_lsn() in recovery
cursor.execute(
"SELECT now(), pg_is_in_recovery(), "
" (SELECT rolsuper FROM pg_catalog.pg_roles WHERE rolname = current_user)"
)
backup_end_time, in_recovery, is_superuser = cursor.fetchone()
if in_recovery:
db_conn.commit()
return None, backup_end_time
if self.pg_version_server >= 100000:
cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()")
else:
cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location()), txid_current()")
backup_end_wal_segment, _ = cursor.fetchone()
db_conn.commit()
# Now force switch of the WAL segment to make sure we have archived a segment with a known
# timestamp after pg_stop_backup() was called.
backup_end_name = "pghoard_end_of_backup"
if is_superuser:
if self.pg_version_server >= 100000:
cursor.execute("SELECT pg_switch_wal()")
else:
cursor.execute("SELECT pg_switch_xlog()")
elif backup_mode == "non-exclusive":
cursor.execute("SELECT pg_start_backup(%s, true, false)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_start_backup(%s, true)", [backup_end_name])
backup_label = cursor.fetchone()[0]
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_start_backup(%s, true)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
return backup_end_wal_segment, backup_end_time
| {
"content_hash": "b434b83c7428d7010e75c6c0dc02ea60",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 124,
"avg_line_length": 45.14485981308411,
"alnum_prop": 0.5822896180519614,
"repo_name": "ohmu/pghoard",
"id": "4efed7c43baef68fe8967a5729dcd555880943b6",
"size": "38644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pghoard/basebackup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4937"
},
{
"name": "Makefile",
"bytes": "1765"
},
{
"name": "Python",
"bytes": "467043"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lgbtstats.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "55d47a5cc45bf9b08feb54d06d891d43",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "arnoldtan/cep_2015_sem2_assign2",
"id": "e2bd0d8912e478236566456343d312766aef6975",
"size": "252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "287622"
},
{
"name": "HTML",
"bytes": "11472"
},
{
"name": "JavaScript",
"bytes": "114491"
},
{
"name": "Python",
"bytes": "13021"
}
],
"symlink_target": ""
} |
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron.manager import NeutronManager
from neutron.tests.unit import test_db_plugin
from oslo.config import cfg
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
changed_fixed_ips = 'fixed_ips' in port['port']
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
elif changed_fixed_ips:
self._check_fixed_ips_and_address_pairs_no_overlap(context,
ret_port)
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_more_than_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'},
{'mac_address': '00:00:00:00:00:04',
'ip_address': '10.0.0.4'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_equal_to_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'}]
self._create_port_with_address_pairs(address_pairs, 201)
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
if ret_code == 201:
self._delete('ports', port['port']['id'])
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_update_fixed_ip_to_address_pair_ip_fail(self):
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'ip_address': '10.0.0.65'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
data = {'port': {'fixed_ips': [{'ip_address': '10.0.0.65'}]}}
req = self.new_update_request('ports', data, port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_update_fixed_ip_to_address_pair_with_mac_fail(self):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)['port']
address_pairs = [
{'mac_address': port['mac_address'],
'ip_address': port['fixed_ips'][0]['ip_address']}]
data = {'port': {addr_pair.ADDRESS_PAIRS: address_pairs}}
req = self.new_update_request('ports', data, port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_address_pair_to_match_fixed_ip_and_mac(self):
with self.network() as net:
with self.subnet(network=net):
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)['port']
address_pairs = [{'mac_address': port['mac_address'],
'ip_address':
port['fixed_ips'][0]['ip_address']}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 400)
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
| {
"content_hash": "716277e8136620b0f5939afed8cf3db9",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 79,
"avg_line_length": 49.75,
"alnum_prop": 0.5227453054747422,
"repo_name": "sajuptpm/neutron-ipam",
"id": "70eb1e336e8cba57bf981a2b6cfd5bba4d3fa492",
"size": "15716",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/icehouse",
"path": "neutron/tests/unit/test_extension_allowedaddresspairs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Makefile",
"bytes": "3295"
},
{
"name": "Python",
"bytes": "9102565"
},
{
"name": "Shell",
"bytes": "9603"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
import skimage.data
import instance_occlsegm_lib
def test_centerize():
img = skimage.data.astronaut()
dst_shape = (480, 640)
img_c, mask_c = instance_occlsegm_lib.image.centerize(
img, dst_shape, return_mask=True)
assert img_c.shape == (dst_shape[0], dst_shape[1], 3)
assert img_c.dtype == img.dtype
assert mask_c.shape == dst_shape
assert mask_c.dtype == bool
| {
"content_hash": "910a0e62c580d43a2677bb9c07134d43",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 58,
"avg_line_length": 28.571428571428573,
"alnum_prop": 0.6575,
"repo_name": "pazeshun/jsk_apc",
"id": "4976fc458503451e6dc49eb73262b18f8912c9a7",
"size": "400",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demos/instance_occlsegm/tests/image_tests/test_centerize.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
} |
from ginga import RGBMap, AutoCuts
from ginga.gw import Widgets
from .base import Stage, StageAction
count = 0
class Preview(Stage):
_stagename = 'preview'
def __init__(self):
super().__init__()
self.fv = None
self.in_image = None
self.image = None
self._chname = ""
def build_gui(self, container):
self.fv = self.pipeline.get("fv")
fr = Widgets.Frame("Preview")
captions = [('Preview Channel:', 'label', 'Channel', 'entryset'),
('Detach Image', 'button'),
]
w, b = Widgets.build_info(captions, orientation='vertical')
self.w.update(b)
b.channel.set_tooltip("Channel for preview image")
b.channel.add_callback('activated', self.set_channel_cb)
b.channel.set_text(self._chname)
b.detach_image.set_tooltip("Detach the current image")
b.detach_image.add_callback('activated', self.insert_image_cb)
fr.set_widget(w)
container.set_widget(fr)
@property
def chname(self):
return self._chname
@chname.setter
def chname(self, val):
self._chname = val
if self.gui_up:
self.w.channel.set_text(val)
def get_channel(self, chname):
channel = self.fv.get_channel_on_demand(chname)
viewer = channel.viewer
# PassThruRGBMapper does not do any RGB mapping
rgbmap = RGBMap.PassThruRGBMapper(self.logger)
viewer.set_rgbmap(rgbmap)
# Clip cuts assumes data does not need to be scaled in cut levels--
# only clipped
viewer.set_autocuts(AutoCuts.Clip(logger=self.logger))
return channel
def set_channel_cb(self, widget):
old = dict(chname=self._chname)
self._chname = widget.get_text().strip()
new = dict(chname=self._chname)
self.pipeline.push(StageAction(self, old, new,
descr="preview / change channel"))
self.get_channel(self._chname)
self.pipeline.run_from(self)
def insert_image_cb(self, widget):
image, self.image = self.image, None
self.in_image = None
self.pipeline.run_from(self)
def run(self, prev_stage):
global count
data = self.pipeline.get_data(prev_stage)
self.verify_2d(data)
if self._bypass or data is None:
self.pipeline.send(res_np=data)
return
if len(self.chname) > 0:
self.pipeline.logger.info('pipeline preview')
channel = self.fv.get_channel_on_demand(self._chname)
in_image = self.pipeline.get('input_image')
if in_image is not self.in_image:
self.in_image = in_image
# <-- new image. Make one of the same type as the input
# TODO: this needs to be user-selectable
# TODO: if this is a revisited image, should look
# up the corresponding previously generated output
# image, if there is one and load it as the output.
self.image = in_image.__class__(logger=self.pipeline.logger)
# copy the header from the input
in_header = in_image.get_header()
# TODO: massage header, maybe add some metadata from
# pipeline?
self.image.update_keywords(in_header)
# assign an output image name
# TODO: name a better output name, that is some kind
# of modified name of the input image name
self.image.set(name='P' + str(count))
count += 1
self.image.set_data(data)
channel.add_image(self.image)
else:
if self.image is not None:
self.image.set_data(data)
# data has changed so redraw image completely
channel.fitsimage.redraw(whence=0)
self.pipeline.send(res_np=data)
def get_image(self):
return self.image
def _get_state(self):
return dict(chname=self._chname)
def export_as_dict(self):
d = super().export_as_dict()
d.update(self._get_state())
return d
def import_from_dict(self, d):
super().import_from_dict(d)
self.chname = d['chname']
| {
"content_hash": "c5810f07b2e3545038d13c997b47e8f5",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 76,
"avg_line_length": 30.36111111111111,
"alnum_prop": 0.5661024702653248,
"repo_name": "naojsoft/ginga",
"id": "369a9fccc684e506aee2c057b38f408c2a993a07",
"size": "4482",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ginga/util/stages/preview.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2781"
},
{
"name": "GLSL",
"bytes": "7344"
},
{
"name": "HTML",
"bytes": "2129"
},
{
"name": "JavaScript",
"bytes": "87198"
},
{
"name": "Jupyter Notebook",
"bytes": "2691970"
},
{
"name": "Makefile",
"bytes": "85"
},
{
"name": "Python",
"bytes": "4426376"
}
],
"symlink_target": ""
} |
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, find_links, *args):
find_links_str = ' '.join('--find-links file://'+x for x in find_links)
cmd_array = ['tools/with_venv.sh',
'python', '.venv/bin/pip', 'install',
'--upgrade']
for link in find_links:
cmd_array.extend(['--find-links', 'file://'+link])
self.run_command(cmd_array + list(args),
redirect_output=False)
def install_dependencies(self, find_links):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install(find_links, 'pip>=6.0')
self.pip_install(find_links, 'setuptools')
self.pip_install(find_links, 'pbr')
self.pip_install(find_links, '-r', self.requirements, '-r', self.test_requirements, '--pre')
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
parser.add_option('-f', '--find-links',
action='append',
help="Build generation directory ")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
| {
"content_hash": "060c2056ef54cbda5c09619405d03176",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 100,
"avg_line_length": 35.16969696969697,
"alnum_prop": 0.5664311562984663,
"repo_name": "sajuptpm/contrail-controller",
"id": "87c8de2e4ed3d87f543333236e35e2944a9b2bae",
"size": "6488",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/api-lib/tools/install_venv_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "51767"
},
{
"name": "C++",
"bytes": "19050770"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Groff",
"bytes": "36777"
},
{
"name": "HTML",
"bytes": "519766"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "LLVM",
"bytes": "2937"
},
{
"name": "Lua",
"bytes": "5819"
},
{
"name": "Makefile",
"bytes": "12449"
},
{
"name": "Protocol Buffer",
"bytes": "6129"
},
{
"name": "Python",
"bytes": "4813021"
},
{
"name": "Shell",
"bytes": "81402"
},
{
"name": "Thrift",
"bytes": "40763"
},
{
"name": "Yacc",
"bytes": "7737"
}
],
"symlink_target": ""
} |
import logging
import json
from webob import Response
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import WSGIApplication
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.exception import OFPUnknownVersion
from ryu.lib import mac
from ryu.lib import dpid as dpid_lib
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.ofproto import ether
from ryu.ofproto import inet
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_2_parser
LOG = logging.getLogger('ryu.app.firewall')
# REST API
#
## about Firewall status
#
# get status of all firewall switches
# GET /firewall/module/status
#
# set enable the firewall switches
# PUT /firewall/module/enable/{switch-id}
# {switch-id} is 'all' or switchID
#
# set disable the firewall switches
# PUT /firewall/module/disable/{switch-id}
# {switch-id} is 'all' or switchID
#
#
## about Firewall rules
#
# get rules of the firewall switches
# GET /firewall/rules/{switch-id}
# {switch-id} is 'all' or switchID
#
# set a rule to the firewall switches
# POST /firewall/rules/{switch-id}
# {switch-id} is 'all' or switchID
#
# delete a rule of the firewall switches from ruleID
# DELETE /firewall/rules/{switch-id}
# {switch-id} is 'all' or switchID
#
OK = 0
NG = -1
SWITCHID_PATTERN = dpid_lib.DPID_PATTERN + r'|all'
REST_ALL = 'all'
REST_SWITCHID = 'switch_id'
REST_RULE_ID = 'rule_id'
REST_STATUS = 'status'
REST_STATUS_ENABLE = 'enable'
REST_STATUS_DISABLE = 'disable'
REST_COOKIE = 'cookie'
REST_PRIORITY = 'priority'
REST_MATCH = 'match'
REST_IN_PORT = 'in_port'
REST_SRC_MAC = 'dl_src'
REST_DST_MAC = 'dl_dst'
REST_DL_TYPE = 'dl_type'
REST_DL_TYPE_ARP = 'ARP'
REST_DL_TYPE_IPV4 = 'IPv4'
REST_SRC_IP = 'nw_src'
REST_DST_IP = 'nw_dst'
REST_NW_PROTO = 'nw_proto'
REST_NW_PROTO_TCP = 'TCP'
REST_NW_PROTO_UDP = 'UDP'
REST_NW_PROTO_ICMP = 'ICMP'
REST_TP_SRC = 'tp_src'
REST_TP_DST = 'tp_dst'
REST_ACTION = 'actions'
REST_ACTION_ALLOW = 'ALLOW'
REST_ACTION_DENY = 'DENY'
STATUS_FLOW_PRIORITY = ofproto_v1_2_parser.UINT16_MAX
ARP_FLOW_PRIORITY = ofproto_v1_2_parser.UINT16_MAX - 1
ACL_FLOW_PRIORITY_MAX = ofproto_v1_2_parser.UINT16_MAX - 2
class RestFirewallAPI(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION]
_CONTEXTS = {'dpset': dpset.DPSet,
'wsgi': WSGIApplication}
def __init__(self, *args, **kwargs):
super(RestFirewallAPI, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
mapper = wsgi.mapper
wsgi.registory['FirewallController'] = self.data
path = '/firewall'
requirements = {'switchid': SWITCHID_PATTERN}
uri = path + '/module/status'
mapper.connect('firewall', uri,
controller=FirewallController, action='get_status',
conditions=dict(method=['GET']))
uri = path + '/module/enable/{switchid}'
mapper.connect('firewall', uri,
controller=FirewallController, action='set_enable',
conditions=dict(method=['PUT']),
requirements=requirements)
uri = path + '/module/disable/{switchid}'
mapper.connect('firewall', uri,
controller=FirewallController, action='set_disable',
conditions=dict(method=['PUT']),
requirements=requirements)
uri = path + '/rules/{switchid}'
mapper.connect('firewall', uri,
controller=FirewallController, action='get_rules',
conditions=dict(method=['GET']),
requirements=requirements)
mapper.connect('firewall', uri,
controller=FirewallController, action='set_rule',
conditions=dict(method=['POST']),
requirements=requirements)
mapper.connect('firewall', uri,
controller=FirewallController, action='delete_rule',
conditions=dict(method=['DELETE']),
requirements=requirements)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
if msg.flags & dp.ofproto.OFPSF_REPLY_MORE:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls(dpset.EventDP, dpset.DPSET_EV_DISPATCHER)
def handler_datapath(self, ev):
if ev.enter:
FirewallController.regist_ofs(ev.dp)
else:
FirewallController.unregist_ofs(ev.dp)
# for OpenFlow version1.0
@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_0(self, ev):
self.stats_reply_handler(ev)
# for OpenFlow version1.2
@set_ev_cls(ofp_event.EventOFPStatsReply, MAIN_DISPATCHER)
def stats_reply_handler_v1_2(self, ev):
self.stats_reply_handler(ev)
class FirewallOfs(object):
def __init__(self, dp):
super(FirewallOfs, self).__init__()
self.dp = dp
self.ctl = FirewallOfctl(dp)
self.cookie = 0
def get_cookie(self):
self.cookie += 1
self.cookie &= ofproto_v1_2_parser.UINT64_MAX
return self.cookie
class FirewallOfsList(dict):
def __init__(self):
super(FirewallOfsList, self).__init__()
def get_ofs(self, dp_id):
if len(self) == 0:
raise ValueError('firewall sw is not connected.')
dps = {}
if dp_id == REST_ALL:
dps = self
else:
try:
dpid = dpid_lib.str_to_dpid(dp_id)
except:
raise ValueError('Invalid switchID.')
if dpid in self:
dps = {dpid: self[dpid]}
else:
msg = 'firewall sw is not connected. : switchID=%s' % dp_id
raise ValueError(msg)
return dps
class FirewallController(ControllerBase):
_OFS_LIST = FirewallOfsList()
def __init__(self, req, link, data, **config):
super(FirewallController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
@staticmethod
def regist_ofs(dp):
try:
f_ofs = FirewallOfs(dp)
except OFPUnknownVersion, message:
mes = 'dpid=%s : %s' % (dpid_lib.dpid_to_str(dp.id), message)
LOG.info(mes)
return
FirewallController._OFS_LIST.setdefault(dp.id, f_ofs)
f_ofs.ctl.set_disable_flow()
f_ofs.ctl.set_arp_flow()
LOG.info('dpid=%s : Join as firewall switch.' %
dpid_lib.dpid_to_str(dp.id))
@staticmethod
def unregist_ofs(dp):
if dp.id in FirewallController._OFS_LIST:
del FirewallController._OFS_LIST[dp.id]
LOG.info('dpid=%s : Leave firewall switch.' %
dpid_lib.dpid_to_str(dp.id))
# GET /firewall/module/status
def get_status(self, req, **_kwargs):
try:
dps = self._OFS_LIST.get_ofs(REST_ALL)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
status = f_ofs.ctl.get_status(self.waiters)
msgs.update(status)
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
# POST /firewall/module/enable/{switchid}
def set_enable(self, req, switchid, **_kwargs):
try:
dps = self._OFS_LIST.get_ofs(switchid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
msg = f_ofs.ctl.set_enable_flow()
msgs.update(msg)
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
# POST /firewall/module/disable/{switchid}
def set_disable(self, req, switchid, **_kwargs):
try:
dps = self._OFS_LIST.get_ofs(switchid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
msg = f_ofs.ctl.set_disable_flow()
msgs.update(msg)
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
# GET /firewall/rules/{switchid}
def get_rules(self, req, switchid, **_kwargs):
try:
dps = self._OFS_LIST.get_ofs(switchid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
rules = f_ofs.ctl.get_rules(self.waiters)
msgs.update(rules)
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
# POST /firewall/rules/{switchid}
def set_rule(self, req, switchid, **_kwargs):
try:
rule = eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
try:
dps = self._OFS_LIST.get_ofs(switchid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
try:
msg = f_ofs.ctl.set_rule(f_ofs.get_cookie(), rule)
msgs.update(msg)
except ValueError, message:
return Response(status=400, body=str(message))
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
# DELETE /firewall/rules/{switchid}
def delete_rule(self, req, switchid, **_kwargs):
try:
ruleid = eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
try:
dps = self._OFS_LIST.get_ofs(switchid)
except ValueError, message:
return Response(status=400, body=str(message))
msgs = {}
for f_ofs in dps.values():
try:
msg = f_ofs.ctl.delete_rule(ruleid, self.waiters)
msgs.update(msg)
except ValueError, message:
return Response(status=400, body=str(message))
body = json.dumps(msgs)
return Response(content_type='application/json', body=body)
class FirewallOfctl(object):
_OFCTL = {ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2}
def __init__(self, dp):
super(FirewallOfctl, self).__init__()
self.dp = dp
version = dp.ofproto.OFP_VERSION
if version not in self._OFCTL:
raise OFPUnknownVersion(version=version)
self.ofctl = self._OFCTL[version]
def get_status(self, waiters):
msgs = self.ofctl.get_flow_stats(self.dp, waiters)
status = REST_STATUS_ENABLE
if str(self.dp.id) in msgs:
flow_stats = msgs[str(self.dp.id)]
for flow_stat in flow_stats:
if flow_stat['priority'] == STATUS_FLOW_PRIORITY:
status = REST_STATUS_DISABLE
msg = {REST_STATUS: status}
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: msg}
def set_disable_flow(self):
cookie = 0
priority = STATUS_FLOW_PRIORITY
match = {}
actions = []
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
cmd = self.dp.ofproto.OFPFC_ADD
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
msg = {'result': 'success',
'details': 'firewall stopped.'}
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: msg}
def set_enable_flow(self):
cookie = 0
priority = STATUS_FLOW_PRIORITY
match = {}
actions = []
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
cmd = self.dp.ofproto.OFPFC_DELETE_STRICT
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
msg = {'result': 'success',
'details': 'firewall running.'}
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: msg}
def set_arp_flow(self):
cookie = 0
priority = ARP_FLOW_PRIORITY
match = {REST_DL_TYPE: ether.ETH_TYPE_ARP}
action = {REST_ACTION: REST_ACTION_ALLOW}
actions = Action.to_openflow(self.dp, action)
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
cmd = self.dp.ofproto.OFPFC_ADD
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
def set_rule(self, cookie, rest):
priority = int(rest.get(REST_PRIORITY, 0))
if priority < 0 or ACL_FLOW_PRIORITY_MAX < priority:
raise ValueError('Invalid priority value. Set [0-%d]'
% ACL_FLOW_PRIORITY_MAX)
match = Match.to_openflow(rest)
actions = Action.to_openflow(self.dp, rest)
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
cmd = self.dp.ofproto.OFPFC_ADD
try:
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
except:
raise ValueError('Invalid rule parameter.')
msg = {'result': 'success',
'details': 'Rule added. : rule_id=%d' % cookie}
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: msg}
def get_rules(self, waiters):
rules = {}
msgs = self.ofctl.get_flow_stats(self.dp, waiters)
if str(self.dp.id) in msgs:
flow_stats = msgs[str(self.dp.id)]
for flow_stat in flow_stats:
if (flow_stat[REST_PRIORITY] != STATUS_FLOW_PRIORITY
and flow_stat[REST_PRIORITY] != ARP_FLOW_PRIORITY):
rule = self._to_rest_rule(flow_stat)
rules.update(rule)
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: rules}
def delete_rule(self, rest, waiters):
try:
if rest[REST_RULE_ID] == REST_ALL:
rule_id = REST_ALL
else:
rule_id = int(rest[REST_RULE_ID])
except:
raise ValueError('Invalid ruleID.')
delete_list = []
msgs = self.ofctl.get_flow_stats(self.dp, waiters)
if str(self.dp.id) in msgs:
flow_stats = msgs[str(self.dp.id)]
for flow_stat in flow_stats:
cookie = flow_stat[REST_COOKIE]
priority = flow_stat[REST_PRIORITY]
if (priority != STATUS_FLOW_PRIORITY
and priority != ARP_FLOW_PRIORITY):
if rule_id == REST_ALL or rule_id == cookie:
match = Match.to_del_openflow(flow_stat[REST_MATCH])
delete_list.append([cookie, priority, match])
if rule_id == cookie:
break
if len(delete_list) == 0:
msg_details = 'Rule is not exist.'
if rule_id != REST_ALL:
msg_details += ' : ruleID=%d' % rule_id
msg = {'result': 'failure',
'details': msg_details}
else:
cmd = self.dp.ofproto.OFPFC_DELETE_STRICT
actions = []
msg_details = 'Rule deleted. : ruleID='
for cookie, priority, match in delete_list:
flow = self._to_of_flow(cookie=cookie, priority=priority,
match=match, actions=actions)
self.ofctl.mod_flow_entry(self.dp, flow, cmd)
msg_details += '%d,' % cookie
msg = {'result': 'success',
'details': msg_details}
switch_id = '%s: %s' % (REST_SWITCHID,
dpid_lib.dpid_to_str(self.dp.id))
return {switch_id: msg}
def _to_of_flow(self, cookie, priority, match, actions):
flow = {'cookie': cookie,
'priority': priority,
'flags': 0,
'idle_timeout': 0,
'hard_timeout': 0,
'match': match,
'actions': actions}
return flow
def _to_rest_rule(self, flow):
rule_id = '%s: %d' % (REST_RULE_ID, flow[REST_COOKIE])
rule = {REST_PRIORITY: flow[REST_PRIORITY]}
rule.update(Match.to_rest(flow))
rule.update(Action.to_rest(flow))
return {rule_id: rule}
class Match(object):
_CONVERT = {REST_DL_TYPE:
{REST_DL_TYPE_ARP: ether.ETH_TYPE_ARP,
REST_DL_TYPE_IPV4: ether.ETH_TYPE_IP},
REST_NW_PROTO:
{REST_NW_PROTO_TCP: inet.IPPROTO_TCP,
REST_NW_PROTO_UDP: inet.IPPROTO_UDP,
REST_NW_PROTO_ICMP: inet.IPPROTO_ICMP}}
@staticmethod
def to_openflow(rest):
match = {}
set_dltype_flg = False
for key, value in rest.items():
if (key == REST_SRC_IP or key == REST_DST_IP
or key == REST_NW_PROTO):
if (REST_DL_TYPE in rest) is False:
set_dltype_flg = True
elif (rest[REST_DL_TYPE] != REST_DL_TYPE_IPV4
and rest[REST_DL_TYPE] != REST_DL_TYPE_ARP):
continue
elif key == REST_TP_SRC or key == REST_TP_DST:
if ((REST_NW_PROTO in rest) is False
or (rest[REST_NW_PROTO] != REST_NW_PROTO_TCP
and rest[REST_NW_PROTO] != REST_NW_PROTO_UDP)):
continue
if key in Match._CONVERT:
if value in Match._CONVERT[key]:
match.setdefault(key, Match._CONVERT[key][value])
else:
raise ValueError('Invalid rule parameter. : key=%s' % key)
else:
match.setdefault(key, value)
if set_dltype_flg:
match.setdefault(REST_DL_TYPE, ether.ETH_TYPE_IP)
return match
@staticmethod
def to_rest(openflow):
of_match = openflow[REST_MATCH]
mac_dontcare = mac.haddr_to_str(mac.DONTCARE)
ip_dontcare = '0.0.0.0'
match = {}
for key, value in of_match.items():
if key == REST_SRC_MAC or key == REST_DST_MAC:
if value == mac_dontcare:
continue
elif key == REST_SRC_IP or key == REST_DST_IP:
if value == ip_dontcare:
continue
elif value == 0:
continue
if key in Match._CONVERT:
conv = Match._CONVERT[key]
conv = dict((value, key) for key, value in conv.items())
match.setdefault(key, conv[value])
else:
match.setdefault(key, value)
return match
@staticmethod
def to_del_openflow(of_match):
mac_dontcare = mac.haddr_to_str(mac.DONTCARE)
ip_dontcare = '0.0.0.0'
match = {}
for key, value in of_match.items():
if key == REST_SRC_MAC or key == REST_DST_MAC:
if value == mac_dontcare:
continue
elif key == REST_SRC_IP or key == REST_DST_IP:
if value == ip_dontcare:
continue
elif value == 0:
continue
match.setdefault(key, value)
return match
class Action(object):
@staticmethod
def to_openflow(dp, rest):
value = rest.get(REST_ACTION, REST_ACTION_ALLOW)
if value == REST_ACTION_ALLOW:
out_port = dp.ofproto.OFPP_NORMAL
action = [{'type': 'OUTPUT',
'port': out_port}]
elif value == REST_ACTION_DENY:
action = []
else:
raise ValueError('Invalid action type.')
return action
@staticmethod
def to_rest(openflow):
if REST_ACTION in openflow:
if len(openflow[REST_ACTION]) > 0:
action = {REST_ACTION: REST_ACTION_ALLOW}
else:
action = {REST_ACTION: REST_ACTION_DENY}
else:
action = {REST_ACTION: 'Unknown action type.'}
return action
| {
"content_hash": "78b719de23168ccf74f0a0fafcc21172",
"timestamp": "",
"source": "github",
"line_count": 668,
"max_line_length": 78,
"avg_line_length": 32.205089820359284,
"alnum_prop": 0.5474829173058151,
"repo_name": "samrussell/ryu",
"id": "e67ccd590c9e997233028a904df1a1cd6f5a1c28",
"size": "22124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ryu/app/rest_firewall.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2112847"
},
{
"name": "Shell",
"bytes": "11953"
}
],
"symlink_target": ""
} |
"""Tracing unittests presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for
details on the presubmit API built into gcl.
"""
PRESUBMIT_VERSION = '2.0.0'
USE_PYTHON3 = True
def RunUnittests(input_api, output_api):
results = []
# Run Pylint over the files in the directory.
pylint_checks = input_api.canned_checks.GetPylint(input_api,
output_api,
version='2.6')
results.extend(input_api.RunTests(pylint_checks))
results.extend(
input_api.canned_checks.RunUnitTestsInDirectory(
input_api,
output_api,
'.',
files_to_check=[r'.+_unittest\.py$'],
run_on_python2=not USE_PYTHON3,
skip_shebang_check=True))
return results
def CheckChangeOnUpload(input_api, output_api):
return RunUnittests(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return RunUnittests(input_api, output_api)
| {
"content_hash": "5c8b09a723f4b0781646eebfddafaa58",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 29.571428571428573,
"alnum_prop": 0.6280193236714976,
"repo_name": "ric2b/Vivaldi-browser",
"id": "395a4faf3848768d23fe9fba7108d3025d29201a",
"size": "1197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/tools/tracing/PRESUBMIT.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from passlib.handlers.pbkdf2 import pbkdf2_sha512
PBKDF2SHA512_HEADER = "$pbkdf2-sha512$12000$"
CUSTOM_HEADER = "biicode$"
def encrypt(password):
'''encrypt a plain password'''
enc = pbkdf2_sha512.encrypt(password, rounds=12000)
enc = enc.replace(PBKDF2SHA512_HEADER, CUSTOM_HEADER, 1)
return enc
def verify(password, encoded):
'''verifies if encoded password correspond to plain password'''
if encoded == "" or password == "" or encoded is None or password is None:
return False
encoded = encoded.replace(CUSTOM_HEADER, PBKDF2SHA512_HEADER, 1)
#print encoded
ver = pbkdf2_sha512.verify(password, encoded)
return ver
| {
"content_hash": "c88cad9f19a1ceb917bf3c1c56dfca89",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 78,
"avg_line_length": 31.904761904761905,
"alnum_prop": 0.7074626865671642,
"repo_name": "biicode/bii-server",
"id": "799cf441685304ee2d59fe767870b9289ffb03c1",
"size": "1182",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils/passlib_pbkdf2_sha512_wrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "400132"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# Django
from django.views.generic.base import TemplateView
class DummyView(TemplateView):
template_name = "Landing Page"
| {
"content_hash": "e6b6e67c29b50c6186f914a540c57473",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 50,
"avg_line_length": 21.125,
"alnum_prop": 0.7692307692307693,
"repo_name": "craiglabenz/django-grapevine",
"id": "8e208f1c87bbe5d055ab36caa9938a4c8975172e",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/core/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "92191"
},
{
"name": "Python",
"bytes": "167458"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import sys
import pytsk3
import datetime
import pyewf
import argparse
import hashlib
class ewf_Img_Info(pytsk3.Img_Info):
def __init__(self, ewf_handle):
self._ewf_handle = ewf_handle
super(ewf_Img_Info, self).__init__(
url="", type=pytsk3.TSK_IMG_TYPE_EXTERNAL)
def close(self):
self._ewf_handle.close()
def read(self, offset, size):
self._ewf_handle.seek(offset)
return self._ewf_handle.read(size)
def get_size(self):
return self._ewf_handle.get_media_size()
argparser = argparse.ArgumentParser(description='Extract the $MFT from all of the NTFS partitions of an E01')
argparser.add_argument(
'-i', '--image',
dest='imagefile',
action="store",
type=str,
default=None,
required=True,
help='E01 to extract from'
)
args = argparser.parse_args()
filenames = pyewf.glob(args.imagefile)
ewf_handle = pyewf.handle()
ewf_handle.open(filenames)
imagehandle = ewf_Img_Info(ewf_handle)
partitionTable = pytsk3.Volume_Info(imagehandle)
for partition in partitionTable:
print partition.addr, partition.desc, "%ss(%s)" % (partition.start, partition.start * 512), partition.len
if 'NTFS' in partition.desc:
filesystemObject = pytsk3.FS_Info(imagehandle, offset=(partition.start*512))
fileobject = filesystemObject.open("/$MFT")
print "File Inode:",fileobject.info.meta.addr
print "File Name:",fileobject.info.name.name
print "File Creation Time:",datetime.datetime.fromtimestamp(fileobject.info.meta.crtime).strftime('%Y-%m-%d %H:%M:%S')
outFileName = str(partition.addr)+fileobject.info.name.name
print outFileName
outfile = open(outFileName, 'w')
filedata = fileobject.read_random(0,fileobject.info.meta.size)
md5hash = hashlib.md5()
md5hash.update(filedata)
print "MD5 Hash",md5hash.hexdigest()
sha1hash = hashlib.sha1()
sha1hash.update(filedata)
print "SHA1 Hash",sha1hash.hexdigest()
outfile.write(filedata)
outfile.close
| {
"content_hash": "659d762a91e5bb9743eaee87763bbb9b",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 122,
"avg_line_length": 33.91525423728814,
"alnum_prop": 0.6921539230384808,
"repo_name": "dlcowen/dfirwizard",
"id": "407a15f6c522050b638c9a753ea07f97531d51c2",
"size": "2119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfirwizard-v7.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "181073"
}
],
"symlink_target": ""
} |
import json
from time import gmtime, strftime
from flask import render_template, make_response
from .ipxebase import PXEProvider
class StaticPXEProvider(PXEProvider):
def __init__(self, *args, **kwargs):
pass
def generate_ipxe_script(self, *args, **kwargs):
"""
Returns an iPXE script using the provided args and kwargs
:param args:
:param kwargs:
:return:
"""
request = kwargs.get("request")
server_data = kwargs.get("server_data")
timestamp = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime())
server_data_dump = self._indenter(server_data)
data = render_template("pigs.ipxe",
server_data=server_data,
server_data_dump=server_data_dump,
server_hostname=server_data["hostname"],
terraform_ip=request.args.get("terraform_ip"),
request=request,
timestamp=timestamp)
r = make_response(data)
r.mimetype = "text/plain"
return r
def _indenter(self, text_to_indent):
"""
Transforms the indented json.dumps() output into a commented form to go
into the iPXE script. This would have been less hackish if the textwrap
module in Python 2.7 wasn't so awful.
"""
temp = ""
for line in json.dumps(text_to_indent, indent=2).split('\n'):
temp += "# %s\n" % line
return temp.strip()
| {
"content_hash": "f00e713ebdb25a7be27b0b6ece92fb16",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 33.4468085106383,
"alnum_prop": 0.544529262086514,
"repo_name": "virtdevninja/steel_pigs",
"id": "8a05f83d84eeb9133a99addab80ad1988f1af6f8",
"size": "2194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "steel_pigs/plugins/providers/static_pxe_provider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1175"
},
{
"name": "Python",
"bytes": "55641"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_site_anp_epg_staticleaf
short_description: Manage site-local EPG static leafs in schema template
description:
- Manage site-local EPG static leafs in schema template on Cisco ACI Multi-Site.
author:
- Dag Wieers (@dagwieers)
version_added: '2.8'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
site:
description:
- The name of the site.
type: str
required: yes
template:
description:
- The name of the template.
type: str
required: yes
anp:
description:
- The name of the ANP.
type: str
epg:
description:
- The name of the EPG.
type: str
pod:
description:
- The pod of the static leaf.
type: str
leaf:
description:
- The path of the static leaf.
type: str
aliases: [ name ]
vlan:
description:
- The VLAN id of the static leaf.
type: int
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
notes:
- The ACI MultiSite PATCH API has a deficiency requiring some objects to be referenced by index.
This can cause silent corruption on concurrent access when changing/removing on object as
the wrong object may be referenced. This module is affected by this deficiency.
seealso:
- module: mso_schema_site_anp_epg
- module: mso_schema_template_anp_epg
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a new static leaf to a site EPG
mso_schema_site_anp_epg_staticleaf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
leaf: Leaf1
vlan: 123
state: present
delegate_to: localhost
- name: Remove a static leaf from a site EPG
mso_schema_site_anp_epg_staticleaf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
leaf: Leaf1
state: absent
delegate_to: localhost
- name: Query a specific site EPG static leaf
mso_schema_site_anp_epg_staticleaf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
epg: EPG1
leaf: Leaf1
state: query
delegate_to: localhost
register: query_result
- name: Query all site EPG static leafs
mso_schema_site_anp_epg_staticleaf:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema1
site: Site1
template: Template1
anp: ANP1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
site=dict(type='str', required=True),
template=dict(type='str', required=True),
anp=dict(type='str', required=True),
epg=dict(type='str', required=True),
pod=dict(type='str'), # This parameter is not required for querying all objects
leaf=dict(type='str', aliases=['name']),
vlan=dict(type='int'),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['pod', 'leaf', 'vlan']],
['state', 'present', ['pod', 'leaf', 'vlan']],
],
)
schema = module.params['schema']
site = module.params['site']
template = module.params['template']
anp = module.params['anp']
epg = module.params['epg']
pod = module.params['pod']
leaf = module.params['leaf']
vlan = module.params['vlan']
state = module.params['state']
leafpath = 'topology/{0}/node-{1}'.format(pod, leaf)
mso = MSOModule(module)
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if not schema_obj:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
schema_id = schema_obj['id']
# Get site
site_id = mso.lookup_site(site)
# Get site_idx
sites = [(s['siteId'], s['templateName']) for s in schema_obj['sites']]
if (site_id, template) not in sites:
mso.fail_json(msg="Provided site/template '{0}-{1}' does not exist. Existing sites/templates: {2}".format(site, template, ', '.join(sites)))
# Schema-access uses indexes
site_idx = sites.index((site_id, template))
# Path-based access uses site_id-template
site_template = '{0}-{1}'.format(site_id, template)
# Get ANP
anp_ref = mso.anp_ref(schema_id=schema_id, template=template, anp=anp)
anps = [a['anpRef'] for a in schema_obj['sites'][site_idx]['anps']]
if anp_ref not in anps:
mso.fail_json(msg="Provided anp '{0}' does not exist. Existing anps: {1}".format(anp, ', '.join(anps)))
anp_idx = anps.index(anp_ref)
# Get EPG
epg_ref = mso.epg_ref(schema_id=schema_id, template=template, anp=anp, epg=epg)
epgs = [e['epgRef'] for e in schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs']]
if epg_ref not in epgs:
mso.fail_json(msg="Provided epg '{0}' does not exist. Existing epgs: {1}".format(epg, ', '.join(epgs)))
epg_idx = epgs.index(epg_ref)
# Get Leaf
leafs = [(l['path'], l['portEncapVlan']) for l in schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticLeafs']]
if (leafpath, vlan) in leafs:
leaf_idx = leafs.index((leafpath, vlan))
# FIXME: Changes based on index are DANGEROUS
leaf_path = '/sites/{0}/anps/{1}/epgs/{2}/staticLeafs/{3}'.format(site_template, anp, epg, leaf_idx)
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticLeafs'][leaf_idx]
if state == 'query':
if leaf is None or vlan is None:
mso.existing = schema_obj['sites'][site_idx]['anps'][anp_idx]['epgs'][epg_idx]['staticLeafs']
elif not mso.existing:
mso.fail_json(msg="Static leaf '{leaf}/{vlan}' not found".format(leaf=leaf, vlan=vlan))
mso.exit_json()
leafs_path = '/sites/{0}/anps/{1}/epgs/{2}/staticLeafs'.format(site_template, anp, epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=leaf_path))
elif state == 'present':
payload = dict(
path=leafpath,
portEncapVlan=vlan,
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=leaf_path, value=mso.sent))
else:
ops.append(dict(op='add', path=leafs_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
| {
"content_hash": "b34daaef0c2f67291502b4bcddee5181",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 148,
"avg_line_length": 29.788235294117648,
"alnum_prop": 0.6270405476566614,
"repo_name": "thaim/ansible",
"id": "022f394a3b4f5c4c22aa13d58c9098edb0ddacb4",
"size": "7797",
"binary": false,
"copies": "7",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/aci/mso_schema_site_anp_epg_staticleaf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
} |
from flask_testing import TestCase
from logging import LogRecord
from connector.app import app
from connector.utils import ConnectorLogFormatter, JsonLogFormatter
from datetime import datetime
class TestUtils(TestCase):
def create_app(self):
app.config.update({'TESTING': True})
return app
def test_json_log_formatter_format(self):
formatter = JsonLogFormatter()
record = LogRecord('fake_name', 'DEBUG', None, None, 'fake_message',
None, None)
record.reseller_name = 'fake_reseller_name'
# as return result of formatter.format is a string we will check the substring
actual_record = formatter.format(record)
assert 'fake_reseller_name' in actual_record
assert datetime.now().isoformat(' ')[:-7] in actual_record
record.msg = {
'text': 'fake_text'
}
actual_record = formatter.format(record)
assert 'fake_text' in actual_record
def test_connector_log_formatter_format(self):
formatter = ConnectorLogFormatter()
record = LogRecord('fake_name', 'DEBUG', None, None, 'fake_message',
None, None)
record.reseller_name = 'fake_reseller_name'
# as return result of formatter.format is a string we will check the substring
actual_record = formatter.format(record)
assert 'fake_reseller_name' in actual_record
assert datetime.now().isoformat(' ')[:-7] in actual_record
record.msg = {
'text': 'fake_text'
}
actual_record = formatter.format(record)
assert 'fake_text' in actual_record
def test_connector_log_formatter_dict_format(self):
formatter = ConnectorLogFormatter()
message = {'message': 'fake_message', 'type': 'message_type'}
record = LogRecord('fake_name', 'DEBUG', None, None, message,
None, None)
record.reseller_name = 'fake_reseller_name'
# as return result of formatter.format is a string we will check the substring
actual_record = formatter.format(record)
assert 'fake_reseller_name' in actual_record
assert 'MESSAGE_TYPE' in actual_record
assert datetime.now().isoformat(' ')[:-7] in actual_record
record.msg = {
'text': 'fake_text'
}
actual_record = formatter.format(record)
assert 'fake_text' in actual_record
| {
"content_hash": "d4ee953b4f4d68ea18db692062d94210",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 41.559322033898304,
"alnum_prop": 0.6272430668841762,
"repo_name": "ingrammicro/fallball-connector",
"id": "fd8e87677ec76281881a438111b1c5cf4ab1e02b",
"size": "2452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "4386"
},
{
"name": "Dockerfile",
"bytes": "382"
},
{
"name": "Python",
"bytes": "106160"
},
{
"name": "Shell",
"bytes": "475"
}
],
"symlink_target": ""
} |
import pytest
@pytest.fixture
def base_reference(testapp, lab, award):
item = {
'award': award['@id'],
'lab': lab['@id']
}
return testapp.post_json('/reference', item).json['@graph'][0]
@pytest.fixture
def upgrade_18_19_reference(testapp, lab, award, gene):
item = {
'award': award['@id'],
'lab': lab['@id'],
'examined_loci': [gene['@id']],
'reference_type': 'functional elements'
}
return testapp.post_json('/reference', item).json['@graph'][0]
@pytest.fixture
def reference_19(base_reference):
item = base_reference.copy()
item.update({
'internal_tags': ['RegulomeDB'],
'schema_version': '19'
})
return item
@pytest.fixture
def reference(lab, award):
return {
'award': award['@id'],
'lab': lab['@id']
}
@pytest.fixture
def upgrade_20_21_reference_a(lab, award, gene):
item = {
'award': award['@id'],
'lab': lab['@id'],
'examined_loci': [gene['@id']],
'reference_type': 'functional elements',
'elements_selection_method': ['point mutations', 'DNase-seq'],
'schema_version': '20'
}
return item
@pytest.fixture
def upgrade_20_21_reference_b(lab, award, gene):
item = {
'award': award['@id'],
'lab': lab['@id'],
'examined_loci': [gene['@id']],
'reference_type': 'functional elements',
'elements_selection_method': ['candidate cis-regulatory elements', 'GRO-cap'],
'schema_version': '20'
}
return item
@pytest.fixture
def upgrade_20_21_reference_c(lab, award, gene):
item = {
'award': award['@id'],
'lab': lab['@id'],
'examined_loci': [gene['@id']],
'reference_type': 'functional elements',
'elements_selection_method': ['point mutations', 'single nucleotide polymorphisms'],
'schema_version': '20'
}
return item
| {
"content_hash": "df88215c9937aa8dd8cd2900de243606",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 92,
"avg_line_length": 25.07792207792208,
"alnum_prop": 0.5613671672708441,
"repo_name": "ENCODE-DCC/encoded",
"id": "d0b83961e52f56f714926973cdb12a91db736346",
"size": "1931",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/encoded/tests/fixtures/schemas/reference.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AngelScript",
"bytes": "741"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "Gherkin",
"bytes": "48806"
},
{
"name": "HTML",
"bytes": "371973"
},
{
"name": "JavaScript",
"bytes": "3493156"
},
{
"name": "Jsonnet",
"bytes": "15159"
},
{
"name": "Makefile",
"bytes": "875"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "2845978"
},
{
"name": "SCSS",
"bytes": "403800"
},
{
"name": "Shell",
"bytes": "30525"
}
],
"symlink_target": ""
} |
import sys
import copy
import numpy
import theano
import theano.tensor as T
from layer import Layer, LinearLayer
import pdb
class NeuralizedPCALayer(Layer):
def __init__(self, n_in, n_out, init_w, init_bvis, varin=None):
"""
The difference between a neuralized PCA Layer and a normal linear
layer is the biases are on the visible side, and the weights are
initialized as PCA transforming matrix.
Though it is literally called PCA layer, but it is also used as
building block for other transformations, like ZCA.
"""
super(NeuralizedPCALayer, self).__init__(n_in, n_out, varin=varin)
if (not init_w) or (not init_bvis):
raise TypeError("You should specify value for init_w and " + \
"init_bvis while instantiating this object.")
# else: TODO assert that they are of valid type.
self.w = init_w
self.bvis = init_bvis
self.params = [self.w, self.bvis]
def fanin(self):
return T.dot(self.varin - self.bvis, self.w)
def output(self, fanin=None):
if fanin == None: fanin = self.fanin()
return fanin
def activ_prime(self):
return 1.
class PCA(object):
"""
A theano based PCA capable of using GPU.
"""
"""
considering to make PCA a layer object
def __init__(self, n_in, n_out, varin=None):
pca_forward_w = theano.shared(
value=pca_forward, name='pca_fwd', borrow=True
)
pca_forward_bvis = theano.shared(
value = self.mean, name='pca_fwd_bvis', borrow=True
)
self.forward_layer = NeuralizedPCALayer(
n_in=self.ndim, n_out=self.retain,
init_w=pca_forward_w, init_bvis=pca_forward_bvis
)
pca_backward_w = theano.shared(
value=pca_backward, name='pca_bkwd', borrow=True
)
pca_backward_bvis = theano.shared(
value=self.mean, name='pca_bkwd_bvis', borrow=True
)
self.backward_layer = LinearLayer(
n_in=self.retain, n_out=self.ndim,
init_w=pca_backward_w, init_b=pca_backward_bvis
)
"""
def fit(self, data, retain=None, verbose=True, whiten=False):
"""
Part of the code is adapted from Roland Memisevic's code.
fit() deals with small datasets, i.e., those datasets that can be
loaded into memory at once. It establishes 2 LinearLayer objects:
PCAForwardLayer and PCABackwardLayer. They define how the data is
mapped after the PCA mapping is learned.
"""
self.retain = retain
assert isinstance(data, numpy.ndarray), \
"data has to be a numpy ndarray."
data = data.copy().astype(theano.config.floatX)
ncases, self.ndim = data.shape
# centralizing data
"""
If you don\'t centralize the dataset, then you are still going to get
perfect reconstruction from the forward/backward mapping matrices, but
1. the eigenvalues you get will no longer match the variance of each
principle components,
2. the \'principle component\' you get will no longer match the
projection of largest variance, and
3. the output will not be centered at the initial data center, neither
at the origin too. However, the shape of the data scatter would
still remain intact.
It just rotates the data by an unwanted angle and shifts the data by an
unexpected vector.
"""
if verbose:
print "Centralizing data... ",
data_variable = T.matrix('data_variable')
np_ncases = numpy.array([ncases]).astype(theano.config.floatX)
fun_partmean = theano.function(
inputs=[data_variable],
outputs=T.sum(data_variable / np_ncases, axis=0)
)
self.mean = numpy.zeros(self.ndim, dtype=theano.config.floatX)
self.mean += fun_partmean(data)
data -= self.mean
if verbose: print "Done."
# compute convariance matrix
if verbose:
print "Computing covariance... ",
covmat = theano.shared(
value=numpy.zeros((self.ndim, self.ndim),
dtype=theano.config.floatX),
name='covmat',
borrow=True
)
fun_update_covmat = theano.function(
inputs=[data_variable],
outputs=[],
updates={covmat: covmat + \
T.dot(data_variable.T, data_variable) / np_ncases}
)
fun_update_covmat(data)
self.covmat = covmat.get_value()
if verbose: print "Done."
# compute eigenvalue and eigenvector
if verbose: print "Eigen-decomposition...",; sys.stdout.flush()
# u should be real valued vector, which stands for the variace of data
# at each PC. v should be a real valued orthogonal matrix.
u, v_unsorted = numpy.linalg.eigh(self.covmat)
self.v = v_unsorted[:, numpy.argsort(u)[::-1]]
u.sort()
u = u[::-1]
# throw away some eigenvalues for numerical stability
self.stds = numpy.sqrt(u[u > 0.])
self.variance_fracs = (self.stds ** 2).cumsum() / (self.stds ** 2).sum()
self.maxPCs = self.stds.shape[0]
if verbose: print "Done. Maximum stable PCs: %d" % self.maxPCs
# decide number of principle components.
error_info = "Wrong \"retain\" value. Should be " + \
"a real number within the interval of (0, 1), " + \
"an integer in (0, maxPCs], None, or \'mle\'."
if self.retain == None:
self.retain = self.maxPCs
elif self.retain == 'mle':
raise NotImplementedError("Adaptive dimension matching," + \
"not implemented yet...")
elif isinstance(self.retain, int):
assert (self.retain > 0 and self.retain <= self.maxPCs), error_info
elif isinstance(self.retain, float):
assert (self.retain > 0 and self.retain < 1), error_info
self.retain = numpy.sum(self.variance_fracs < self.retain) + 1
if verbose:
print "Number of selected PCs: %d, ratio of retained variance: %f"%\
(self.retain, self.variance_fracs[self.retain-1])
self._build_layers(whiten)
def fit_partwise(self, data_genfun, data_resetfun, ncases, ndim,
retain=None, verbose=True, whiten=False):
"""
fit_partwise() is for computing PCA for large datasets. the data part
is generated by a generator, and at each iteration the generated data
should be in the form of a single numpy.ndarray, with 2-d structure.
The method establishes 2 LinearLayer objects: PCAForwardLayer and
PCABackwardLayer. They define how the data is mapped after the PCA
mapping is learned.
"""
self.retain = retain
self.ndim = ndim
# centralizing data
if verbose:
print "Centralizing data..."
data_variable = T.matrix('data_variable')
np_ncases = numpy.array([ncases]).astype(theano.config.floatX)
fun_partmean = theano.function(
inputs=[data_variable],
outputs=T.sum(data_variable / np_ncases, axis=0)
)
self.mean = numpy.zeros(self.ndim, dtype=theano.config.floatX)
data_resetfun()
data_generator = data_genfun()
for data_part in data_generator:
assert isinstance(data_part, numpy.ndarray), (
"data_genfun has to be a generator function yielding "
"numpy.ndarray.")
data_part = data_part.astype(theano.config.floatX)
_, self.ndim = data_part.shape
self.mean += fun_partmean(data_part)
if verbose:
print ".",
sys.stdout.flush()
if verbose: print "Done."
# compute convariance matrix
if verbose:
print "Computing covariance..."
covmat = theano.shared(
value=numpy.zeros((self.ndim, self.ndim),
dtype=theano.config.floatX),
name='covmat',
borrow=True
)
fun_update_covmat = theano.function(
inputs=[data_variable],
outputs=[],
updates={covmat: covmat + \
T.dot(data_variable.T, data_variable) / np_ncases}
)
data_resetfun()
data_generator = data_genfun()
for data_part in data_generator:
data_part = data_part.astype(theano.config.floatX) - self.mean
fun_update_covmat(data_part)
if verbose:
print ".",
sys.stdout.flush()
self.covmat = covmat.get_value()
if verbose: print "Done."
# compute eigenvalue and eigenvector
if verbose: print "Eigen-decomposition...",; sys.stdout.flush()
# u should be real valued vector, which stands for the variace of data
# at each PC. v should be a real valued orthogonal matrix.
u, v_unsorted = numpy.linalg.eigh(self.covmat)
self.v = v_unsorted[:, numpy.argsort(u)[::-1]]
u.sort()
u = u[::-1]
# throw away some eigenvalues for numerical stability
self.stds = numpy.sqrt(u[u > 0.])
self.variance_fracs = (self.stds ** 2).cumsum() / (self.stds ** 2).sum()
self.maxPCs = self.stds.shape[0]
if verbose: print "Done. Maximum stable PCs: %d" % self.maxPCs
# decide number of principle components.
error_info = "Wrong \"retain\" value. Should be " + \
"a real number within the interval of (0, 1), " + \
"an integer in (0, maxPCs], None, or \'mle\'."
if self.retain == None:
self.retain = self.maxPCs
elif self.retain == 'mle':
raise NotImplementedError("Adaptive dimension matching," + \
"not implemented yet...")
elif isinstance(self.retain, int):
assert (self.retain > 0 and self.retain <= self.maxPCs), error_info
elif isinstance(self.retain, float):
assert (self.retain > 0 and self.retain < 1), error_info
self.retain = numpy.sum(self.variance_fracs < self.retain) + 1
if verbose:
print "Number of selected PCs: %d, ratio of retained variance: %f"%\
(self.retain, self.variance_fracs[self.retain-1])
self._build_layers(whiten)
def _build_layers(self, whiten):
# decide if or not to whiten data
if whiten:
pca_forward = self.v[:, :self.retain] / self.stds[:self.retain]
pca_backward = (self.v[:, :self.retain] * self.stds[:self.retain]).T
else:
pca_forward = self.v[:, :self.retain]
pca_backward = pca_forward.T
# build transforming layers
pca_forward_w = theano.shared(
value=pca_forward, name='pca_fwd', borrow=True
)
pca_forward_bvis = theano.shared(
value = self.mean, name='pca_fwd_bvis', borrow=True
)
self.forward_layer = NeuralizedPCALayer(
n_in=self.ndim, n_out=self.retain,
init_w=pca_forward_w, init_bvis=pca_forward_bvis
)
pca_backward_w = theano.shared(
value=pca_backward, name='pca_bkwd', borrow=True
)
pca_backward_bvis = theano.shared(
value=self.mean, name='pca_bkwd_bvis', borrow=True
)
self.backward_layer = LinearLayer(
n_in=self.retain, n_out=self.ndim,
init_w=pca_backward_w, init_b=pca_backward_bvis
)
self.outdim = self.retain
def forward(self, data, batch_size=10000, verbose=True):
"""
Maps the given data to PCA representation, in a batchwise manner.
There is no need to do the batchwise mapping though, but this
implementation is for the unloaded version in the future. That will
allow us to do PCA mapping on arbitrarilly large dataset.
Parameters
------------
data : numpy.ndarray
Data to be mapped.
Returns
------------
numpy.ndarray object.
"""
assert hasattr(self, 'forward_layer'), 'Please fit the model first.'
data = data.astype(theano.config.floatX)
ncases, ndim = data.shape
assert ndim == self.ndim, \
'Given data dimension doesn\'t match the learned model.'
nbatches = (ncases + batch_size - 1) / batch_size
map_function = theano.function(
[self.forward_layer.varin],
self.forward_layer.output()
)
if verbose:
print "Transforming, %d dots to punch:" % nbatches,
pcaed_data = []
for bidx in range(nbatches):
if verbose:
print ".",
sys.stdout.flush()
start = bidx * batch_size
end = min((bidx + 1) * batch_size, ncases)
pcaed_data.append(map_function(data[start:end, :]))
pcaed_data = numpy.concatenate(pcaed_data, axis=0)
if verbose: print "Done."
return pcaed_data
def backward(self, data, batch_size=10000, verbose=True):
"""
The same to forward(), but in a reverse direction.
data : numpy.ndarray
Data to be mapped.
Returns
------------
numpy.ndarray object.
"""
assert hasattr(self, 'backward_layer'), 'Please fit the model first.'
data = data.astype(theano.config.floatX)
ncases, ndim = data.shape
assert ndim == self.outdim, \
'Given data dimension doesn\'t match the learned model.'
nbatches = (ncases + batch_size - 1) / batch_size
map_function = theano.function(
[self.backward_layer.varin],
self.backward_layer.output()
)
if verbose:
print "Transforming, %d dots to punch:" % nbatches,
recons_data = []
for bidx in range(nbatches):
if verbose:
print ".",
sys.stdout.flush()
start = bidx * batch_size
end = min((bidx + 1) * batch_size, ncases)
recons_data.append(map_function(data[start:end, :]))
recons_data = numpy.concatenate(recons_data, axis=0)
if verbose: print "Done."
return recons_data
def energy_dist(self,):
"""
"""
assert hasattr(self, 'variance_fracs'), \
"The model has not been fitted."
return self.variance_fracs
class ZCA(PCA):
def _build_layers(self, whiten):
# decide if or not to whiten data
if whiten:
zca_forward = numpy.dot(
self.v[:, :self.retain] / self.stds[:self.retain],
self.v[:, :self.retain].T
)
zca_backward = numpy.dot(
self.v[:, :self.retain],
(self.v[:, :self.retain] * self.stds[:self.retain]).T
)
else:
zca_forward = numpy.dot(
self.v[:, :self.retain],
self.v[:, :self.retain].T
)
zca_backward = zca_forward
# build transforming layers
zca_forward_w = theano.shared(
value=zca_forward, name='zca_fwd', borrow=True
)
zca_forward_bvis = theano.shared(
value=self.mean, name='zca_fwd_bvis', borrow=True
)
self.forward_layer = NeuralizedPCALayer(
n_in=self.ndim, n_out=self.ndim,
init_w=zca_forward_w, init_bvis=zca_forward_bvis
)
zca_backward_w = theano.shared(
value=zca_backward, name='zca_bkwd', borrow=True
)
zca_backward_bvis = theano.shared(
value=self.mean, name='zca_bkwd_bvis', borrow=True
)
self.backward_layer = LinearLayer(
n_in=self.ndim, n_out=self.ndim,
init_w=zca_backward_w, init_b=zca_backward_bvis
)
self.outdim = self.ndim
class SubtractMean(Layer):
def __init__(self, n_in, varin=None):
"""
For each sample, subtract its mean value. So the output for one certain
sample is fixed, and doesn't change w.r.t. other samples.
"""
super(SubtractMean, self).__init__(n_in, n_in, varin=varin)
def output(self):
return (self.varin.T - self.varin.mean(axis=1)).T
def _print_str(self):
return " (" + self.__class__.__name__ + ")"
class SubtractMeanAndNormalizeH(Layer):
def __init__(self, n_in, varin=None):
"""
This is also a sample-by-sample process. For each sample, subtract its
mean value and then normalize values *within* the sample. So the output
for one certain sample is also fixed, no matter what other samples are.
"""
super(SubtractMeanAndNormalizeH, self).__init__(n_in, n_in, varin=varin)
def output(self):
mean_zero = (self.varin.T - self.varin.mean(axis=1)).T
return (mean_zero.T / (mean_zero.std(axis=1) + 1e-10)).T
def _print_str(self):
return " (" + self.__class__.__name__ + ")"
class SubtractMeanAndNormalize(SubtractMeanAndNormalizeH):
def __init__(self, n_in, varin=None):
super(SubtractMeanAndNormalize, self).__init__(n_in, varin=varin)
print ("\nWarning: SubtractMeanAndNormalize is depreciated, use "
"SubtractMeanAndNormalizeH instead.\n")
class SubtractMeanAndNormalizeV(Layer):
def __init__(self, n_in, varin=None):
"""
This is a process which normalizes each dimension across the whole
dataset. So before applying it, this layer itself needs to be fitted.
"""
super(SubtractMeanAndNormalizeV, self).__init__(n_in, n_in, varin=varin)
self.bias = theano.shared(
value=numpy.zeros(n_in, dtype=theano.config.floatX),
name='bias_meanormV',
borrow=True)
self.scale = theano.shared(
value=numpy.zeros(n_in, dtype=theano.config.floatX),
name='scale_meanormV',
borrow=True)
def fit(self, data):
self.bias.set_value(-data.mean(axis=0))
self.scale.set_value(data.std(axis=0))
def output(self):
return (self.varin + self.bias) / (self.scale + 1e-10)
def _print_str(self):
return " (" + self.__class__.__name__ + ")"
| {
"content_hash": "0f35f749d897a44dfdacaeab4f42f21a",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 80,
"avg_line_length": 38.39511201629328,
"alnum_prop": 0.5637598132824103,
"repo_name": "hantek/NeuroBricks",
"id": "42bb782dae4bbb49de1be1aa013d54c452973fce",
"size": "18852",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neurobricks/preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "196076"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/bio_engineer/creature/shared_creature_guf_drolg.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "abad20f53d830681e363aea66a328423",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 24.923076923076923,
"alnum_prop": 0.7037037037037037,
"repo_name": "anhstudios/swganh",
"id": "6b49d1a14677794a557473b1a2f131d1e3bd90eb",
"size": "469",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/bio_engineer/creature/shared_creature_guf_drolg.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
"""
Interpolate OpenType Layout tables (GDEF / GPOS / GSUB).
"""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.ttLib import TTFont
from fontTools.varLib import models, VarLibError, load_designspace
from fontTools.varLib.merger import InstancerMerger
import os.path
import logging
from pprint import pformat
log = logging.getLogger("fontTools.varLib.interpolate_layout")
def interpolate_layout(designspace_filename, loc, master_finder=lambda s:s, mapped=False):
"""
Interpolate GPOS from a designspace file and location.
If master_finder is set, it should be a callable that takes master
filename as found in designspace file and map it to master font
binary as to be opened (eg. .ttf or .otf).
If mapped is False (default), then location is mapped using the
map element of the axes in designspace file. If mapped is True,
it is assumed that location is in designspace's internal space and
no mapping is performed.
"""
axes, internal_axis_supports, base_idx, normalized_master_locs, masters, instances = load_designspace(designspace_filename)
log.info("Building interpolated font")
log.info("Loading master fonts")
basedir = os.path.dirname(designspace_filename)
master_ttfs = [master_finder(os.path.join(basedir, m['filename'])) for m in masters]
master_fonts = [TTFont(ttf_path) for ttf_path in master_ttfs]
#font = master_fonts[base_idx]
font = TTFont(master_ttfs[base_idx])
log.info("Location: %s", pformat(loc))
if not mapped:
loc = {name:axes[name].map_forward(v) for name,v in loc.items()}
log.info("Internal location: %s", pformat(loc))
loc = models.normalizeLocation(loc, internal_axis_supports)
log.info("Normalized location: %s", pformat(loc))
# Assume single-model for now.
model = models.VariationModel(normalized_master_locs)
assert 0 == model.mapping[base_idx]
merger = InstancerMerger(font, model, loc)
log.info("Building interpolated tables")
merger.mergeTables(font, master_fonts, ['GPOS'])
return font
def main(args=None):
from fontTools import configLogger
import sys
if args is None:
args = sys.argv[1:]
designspace_filename = args[0]
locargs = args[1:]
outfile = os.path.splitext(designspace_filename)[0] + '-instance.ttf'
# TODO: allow user to configure logging via command-line options
configLogger(level="INFO")
finder = lambda s: s.replace('master_ufo', 'master_ttf_interpolatable').replace('.ufo', '.ttf')
loc = {}
for arg in locargs:
tag,val = arg.split('=')
loc[tag] = float(val)
font = interpolate_layout(designspace_filename, loc, finder)
log.info("Saving font %s", outfile)
font.save(outfile)
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
sys.exit(main())
import doctest
sys.exit(doctest.testmod().failed)
| {
"content_hash": "d5df27bfeb35e9309e0f4aed0676cd11",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 124,
"avg_line_length": 30.78021978021978,
"alnum_prop": 0.7361656551231703,
"repo_name": "Pal3love/otRebuilder",
"id": "ca9ccfeb0eac9e89b11b0dd7f6f7aecea442527f",
"size": "2801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Package/otRebuilder/Dep/fontTools/varLib/interpolate_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2756220"
}
],
"symlink_target": ""
} |
from fellar.models import FacebookGroupPost, FacebookGroup
from flask import Blueprint, render_template
from glask import redirect_for
app = Blueprint('cellar', __name__)
@app.route('/')
def index():
return redirect_for('.group_list')
@app.route('/group/')
def group_list():
groups = FacebookGroup.query.all()
return render_template('cellar/group/list.html', groups=groups)
@app.route('/group/<int:id>/post')
def post_list(id):
group = FacebookGroup.query.get_or_404(id)
posts = FacebookGroupPost.query.with_parent(group).all()
return render_template('cellar/post/list.html', group=group, posts=posts)
| {
"content_hash": "3c698490b3163090edf5052889fe61d2",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 27.52173913043478,
"alnum_prop": 0.717219589257504,
"repo_name": "sungmin-park/fellar",
"id": "c5f100b16657ef83bedd3d132792b2e81d3bd8ca",
"size": "633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fellar/controllers/cellar.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1889"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class Drivers(models.Model):
name = models.CharField(max_length=100, blank=False)
address = models.CharField(max_length=200, blank=False)
scca_number = models.IntegerField(null=True)
emergency_name = models.CharField(max_length=100, blank=False)
emergency_phone = models.CharField(max_length=10, blank=False)
novice = models.BooleanField(default=True)
default_car = models.ForeignKey('Cars', null=True)
default_number = models.ForeignKey('Numbers', null=True)
def __str__(self):
return self.name
class Cars(models.Model):
make = models.CharField(max_length=50, blank=False)
model = models.CharField(max_length=200, blank=False)
year = models.CharField(max_length=4)
car_class = models.ForeignKey('Classes')
driver_list = models.ManyToManyField('Drivers')
def __str_(self):
return "{0} {1} {2} ({3})".format(self.year, self.make, self.model, self.car_class.name)
class Numbers(models.Model):
number = models.IntegerField(blank=False)
driver = models.ForeignKey('Drivers')
def __str__self():
return str(self.number);
class Events(models.Model):
location = models.CharField(max_length=200, blank=False)
name = models.CharField(max_length=200, blank=False)
date = models.DateField()
def __str_(self):
return "{0} ({1})".format(self.name, self.date)
class Runs(models.Model):
time = models.DecimalField(max_digits=10, decimal_places=3, blank=False)
cones = models.IntegerField()
gates = models.IntegerField()
driver = models.ForeignKey('EventDrivers')
class Classes(models.Model):
name = models.CharField(max_length=20, blank=False)
abbr = models.CharField(max_length=4, null=True)
parent = models.ForeignKey('self', blank=True, null=True)
def __str_(self):
return self.name
class EventDrivers(models.Model):
event = models.ForeignKey('Events')
driver = models.ForeignKey('Drivers')
car = models.ForeignKey('Cars')
number = models.ForeignKey('Numbers')
class Meta:
unique_together = ('event', 'driver')
def __str_(self):
return "{0} - {1}".format(self.number.number, self.driver.name)
| {
"content_hash": "2cc01bfb6ae93eebc63cd045a8f84d9c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 96,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.6782222222222222,
"repo_name": "tahosa/openracer-python",
"id": "837f1389019698ac5e07a949b15bbc91720a9315",
"size": "2250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "852"
},
{
"name": "HTML",
"bytes": "12635"
},
{
"name": "JavaScript",
"bytes": "15335"
},
{
"name": "Python",
"bytes": "17003"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('general', '0005_tag'),
('finance', '0004_auto_20180424_1009'),
]
operations = [
migrations.RenameField(
model_name='financeentity',
old_name='linked_person',
new_name='canonical_person',
),
migrations.AddField(
model_name='financeentity',
name='canonical_organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='finance_entities', to='general.Organization'),
),
migrations.AddField(
model_name='financeentity',
name='related_people',
field=models.ManyToManyField(blank=True, related_name='finance_entities_all', to='general.Person'),
),
]
| {
"content_hash": "9ece045b8beb020a3bd945eeef4c5f87",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 164,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.6157024793388429,
"repo_name": "access-missouri/am-django-project",
"id": "9849031ab0a490d0553103c86c4a68bca30cb63b",
"size": "1039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "am/finance/migrations/0005_auto_20180507_1055.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "208381"
},
{
"name": "HTML",
"bytes": "75543"
},
{
"name": "JavaScript",
"bytes": "68836"
},
{
"name": "Makefile",
"bytes": "803"
},
{
"name": "Python",
"bytes": "241729"
},
{
"name": "Ruby",
"bytes": "105"
}
],
"symlink_target": ""
} |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Settings specific to chartforge:
CHART_FORGE = {
# list of all the apps containing charts
'chart_apps': [
'tests'
],
'chart_templates': [
os.path.join(BASE_DIR, 'tests/templates')
]
}
SECRET_KEY = 'secret test key'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'chartforge',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'tests', 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, '.tests.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
ROOT_URLCONF = 'tests.urls'
| {
"content_hash": "6823c2200f30bf66aebeb1541b33a25c",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 70,
"avg_line_length": 25.064935064935064,
"alnum_prop": 0.6362694300518135,
"repo_name": "featherweightweb/django-chartforge",
"id": "455e8edb81a74d3e83d6e6aca66d07510a12ce73",
"size": "1930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22663"
}
],
"symlink_target": ""
} |
import helper
# print("hello")
if __name__ == '__main__':
helper.greeting("hello")
| {
"content_hash": "a8ab45d61515e78174192e5ca4c83732",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 28,
"avg_line_length": 11.75,
"alnum_prop": 0.5425531914893617,
"repo_name": "tmg6hk/cs3240-labdemo",
"id": "a053eaecb3d5e1836743a61ec707d53cf26ab91f",
"size": "94",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143"
}
],
"symlink_target": ""
} |
"""
@file reboottime.py
"""
##
# @addtogroup pnp pnp
# @brief This is pnp component
# @{
# @addtogroup reboottime reboottime
# @brief This is reboottime module
# @{
##
import os
import time
from datetime import datetime
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.helper import collect_pnp_log
class RebootTimeTest(oeRuntimeTest):
"""The case will measure system reboot time
@class RebootTimeTest
"""
def test_reboottime(self):
"""Measure system reboot time
@fn test_reboottime
@param self
@return
"""
filename = os.path.basename(__file__)
casename = os.path.splitext(filename)[0]
(status, out)=self.target.run('date +"%m-%d %H:%M:%S"; reboot &')
print "\nReboot start time: %s\n" % (out)
start_t = datetime.strptime(out,"%m-%d %H:%M:%S")
#print start_t
time.sleep(60)
(status, out)=self.target.run("journalctl -b -a >/tmp/system.log")
##
# TESTPOINT: #1, test_reboottime
#
self.assertEqual(status, 0, msg="Error messages: %s" % out)
(status, out)=self.target.run(" cat /tmp/system.log | "
"grep 'Starting Login' | "
"awk '{print $1, $2, $3}'")
##
# TESTPOINT: #2, test_reboottime
#
self.assertEqual(status, 0, msg="Error messages: %s" % out)
print "\nReboot end time: %s\n" % (out)
end_t = datetime.strptime(out,"%b %d %H:%M:%S")
#print end_t
used_t = end_t -start_t
reboot_time = used_t.total_seconds()
reboot_time_str = str(reboot_time) + "s"
if(reboot_time <= 0):
print "please check system date:\n"
print reboot_time_str
##
# TESTPOINT: #3, test_reboottime
#
self.assertEqual(-1, 0, reboot_time_str)
else:
collect_pnp_log(casename, casename, reboot_time_str)
print "\n%s:%s\n" % (casename, reboot_time_str)
##
# TESTPOINT: #4, test_reboottime
#
self.assertEqual(status, 0, reboot_time_str)
##
# @}
# @}
##
| {
"content_hash": "3d99fffb2abd12db4ac24eae48568316",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 74,
"avg_line_length": 26.97530864197531,
"alnum_prop": 0.5395881006864989,
"repo_name": "daweiwu/meta-iotqa-1",
"id": "ddcca5ceb5b21ed84aac7d2f91d0a56df0961547",
"size": "2230",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/oeqa/runtime/pnp/reboottime.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "BitBake",
"bytes": "6677"
},
{
"name": "C",
"bytes": "5625"
},
{
"name": "Java",
"bytes": "504"
},
{
"name": "JavaScript",
"bytes": "32196"
},
{
"name": "M4",
"bytes": "5945"
},
{
"name": "Makefile",
"bytes": "392"
},
{
"name": "Python",
"bytes": "524122"
},
{
"name": "Shell",
"bytes": "10369"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frontend', '0056_auto_20190903_1301'),
]
operations = [
migrations.AddField(
model_name='measure',
name='denominator_bnf_codes',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=15), default=[], size=None),
preserve_default=False,
),
migrations.AddField(
model_name='measure',
name='denominator_bnf_codes_query',
field=models.CharField(max_length=10000, null=True),
),
migrations.AddField(
model_name='measure',
name='denominator_is_list_of_bnf_codes',
field=models.BooleanField(default=True),
),
]
| {
"content_hash": "483f1a935ea0baaadeb3f54bf42f9d27",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 127,
"avg_line_length": 30.366666666666667,
"alnum_prop": 0.6070252469813392,
"repo_name": "annapowellsmith/openpresc",
"id": "8a8f1758ca6a6c5a602604eede9e09daf7557b12",
"size": "985",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "openprescribing/frontend/migrations/0057_auto_20190904_1924.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "95907"
},
{
"name": "HTML",
"bytes": "68653"
},
{
"name": "JavaScript",
"bytes": "14332669"
},
{
"name": "Makefile",
"bytes": "103"
},
{
"name": "Python",
"bytes": "352287"
},
{
"name": "Shell",
"bytes": "3537"
}
],
"symlink_target": ""
} |
"""Metadata request handler."""
import hashlib
import hmac
import os
from oslo_config import cfg
from oslo_log import log as logging
import six
import webob.dec
import webob.exc
from nova.api.metadata import base
from nova import conductor
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import memorycache
from nova import utils
from nova import wsgi
CACHE_EXPIRATION = 15 # in seconds
CONF = cfg.CONF
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
metadata_proxy_opts = [
cfg.BoolOpt(
'service_metadata_proxy',
default=False,
help='Set flag to indicate Neutron will proxy metadata requests and '
'resolve instance ids.'),
cfg.StrOpt(
'metadata_proxy_shared_secret',
default='', secret=True,
help='Shared secret to validate proxies Neutron metadata requests'),
]
CONF.register_opts(metadata_proxy_opts, 'neutron')
LOG = logging.getLogger(__name__)
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
self._cache = memorycache.get_client()
self.conductor_api = conductor.API()
def get_metadata_by_remote_address(self, address):
if not address:
raise exception.FixedIpNotFoundForAddress(address=address)
cache_key = 'metadata-%s' % address
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_address(self.conductor_api, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
def get_metadata_by_instance_id(self, instance_id, address):
cache_key = 'metadata-%s' % instance_id
data = self._cache.get(cache_key)
if data:
return data
try:
data = base.get_metadata_by_instance_id(self.conductor_api,
instance_id, address)
except exception.NotFound:
return None
self._cache.set(cache_key, data, CACHE_EXPIRATION)
return data
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if os.path.normpath(req.path_info) == "/":
resp = base.ec2_md_print(base.VERSIONS + ["latest"])
req.response.body = resp
req.response.content_type = base.MIME_TYPE_TEXT_PLAIN
return req.response
if CONF.neutron.service_metadata_proxy:
meta_data = self._handle_instance_id_request(req)
else:
if req.headers.get('X-Instance-ID'):
LOG.warning(
_LW("X-Instance-ID present in request headers. The "
"'service_metadata_proxy' option must be "
"enabled to process this header."))
meta_data = self._handle_remote_ip_request(req)
if meta_data is None:
raise webob.exc.HTTPNotFound()
try:
data = meta_data.lookup(req.path_info)
except base.InvalidMetadataPath:
raise webob.exc.HTTPNotFound()
if callable(data):
return data(req, meta_data)
resp = base.ec2_md_print(data)
if isinstance(resp, six.text_type):
req.response.text = resp
else:
req.response.body = resp
req.response.content_type = meta_data.get_mimetype()
return req.response
def _handle_remote_ip_request(self, req):
remote_address = req.remote_addr
if CONF.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
meta_data = self.get_metadata_by_remote_address(remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for ip: %s'),
remote_address)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for ip: %s'),
remote_address)
return meta_data
def _handle_instance_id_request(self, req):
instance_id = req.headers.get('X-Instance-ID')
tenant_id = req.headers.get('X-Tenant-ID')
signature = req.headers.get('X-Instance-ID-Signature')
remote_address = req.headers.get('X-Forwarded-For')
# Ensure that only one header was passed
if instance_id is None:
msg = _('X-Instance-ID header is missing from request.')
elif signature is None:
msg = _('X-Instance-ID-Signature header is missing from request.')
elif tenant_id is None:
msg = _('X-Tenant-ID header is missing from request.')
elif not isinstance(instance_id, six.string_types):
msg = _('Multiple X-Instance-ID headers found within request.')
elif not isinstance(tenant_id, six.string_types):
msg = _('Multiple X-Tenant-ID headers found within request.')
else:
msg = None
if msg:
raise webob.exc.HTTPBadRequest(explanation=msg)
expected_signature = hmac.new(
CONF.neutron.metadata_proxy_shared_secret,
instance_id,
hashlib.sha256).hexdigest()
if not utils.constant_time_compare(expected_signature, signature):
if instance_id:
LOG.warning(_LW('X-Instance-ID-Signature: %(signature)s does '
'not match the expected value: '
'%(expected_signature)s for id: '
'%(instance_id)s. Request From: '
'%(remote_address)s'),
{'signature': signature,
'expected_signature': expected_signature,
'instance_id': instance_id,
'remote_address': remote_address})
msg = _('Invalid proxy request signature.')
raise webob.exc.HTTPForbidden(explanation=msg)
try:
meta_data = self.get_metadata_by_instance_id(instance_id,
remote_address)
except Exception:
LOG.exception(_LE('Failed to get metadata for instance id: %s'),
instance_id)
msg = _('An unknown error has occurred. '
'Please try your request again.')
raise webob.exc.HTTPInternalServerError(
explanation=six.text_type(msg))
if meta_data is None:
LOG.error(_LE('Failed to get metadata for instance id: %s'),
instance_id)
elif meta_data.instance.project_id != tenant_id:
LOG.warning(_LW("Tenant_id %(tenant_id)s does not match tenant_id "
"of instance %(instance_id)s."),
{'tenant_id': tenant_id, 'instance_id': instance_id})
# causes a 404 to be raised
meta_data = None
return meta_data
| {
"content_hash": "ce31dc4c32623e88f696a42fbb245c30",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 35.50239234449761,
"alnum_prop": 0.5654986522911051,
"repo_name": "affo/nova",
"id": "ff2e684bbc8adc5631af68bc22d28f9ef743f645",
"size": "8152",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/metadata/handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3223"
},
{
"name": "Python",
"bytes": "15659662"
},
{
"name": "Shell",
"bytes": "20716"
}
],
"symlink_target": ""
} |
__author__ = 'Jeff'
import re
import os
import zipfile
import smtplib
import getpass
from email import encoders
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
userEmail = ''
toEmail = ''
while 1:
userEmail = input("Please enter your email address: ")
if not re.match("[^@]+@[^@]+\.[^@]+", userEmail):
print("Invalid Email! try Again!")
else:
break
while 1:
toEmail = input("Who are you sending this to?:")
if not re.match("[^@]+@[^@]+\.[^@]+", toEmail):
print("Invalid Email! try Again!")
else:
break
passw = getpass.getpass('Enter your password:')
subject = 'Jeff Registre - All assignments'
body = 'Hello!! This is python 3.4, on behalf of Jeff Registre, ' \
'I am sending you this message.\n Attached to this email ' \
'are my assignments for this class in a nicely generated zip file'
print("Processing.............")
def getallpaths():
listdir = os.listdir("csc344")
checked = []
for curdir in listdir:
if os.path.isdir("csc344/" + curdir):
checked.append(curdir)
return checked
def getallfilenames(subdir):
filepath = "csc344/" + subdir
filenames = os.listdir(filepath)
return filenames
def getallfiles(subdir):
filepath = "csc344/" + subdir
filenames = os.listdir(filepath)
files = []
for name in filenames:
files.append(name + "\n" + open(filepath + "/" + name).read())
return files
def getregexed(regex, source):
return re.findall(regex, source)
def postprocessfile(filepath, name):
file = open(filepath, "r")
lines = file.readlines()
file.close()
file = open(filepath, "w")
added = []
if name == "a1C":
for line in lines:
if line[7:14] != "include" and line not in added:
file.write(line)
added.append(line)
elif name == "a5Python":
for line in lines:
if line[13:19] != "import" and line not in added:
file.write(line)
added.append(line)
else:
for line in lines:
line = line.strip()
if line not in added:
added.append(line)
file.write(line + "\n")
file.close()
def file_len(fname):
with open(fname) as f:
return sum(1 for _ in f)
def generatehtml():
filename = os.path.join("csc344/" + "a5/" + "index.html")
html = open(filename, "w")
html.write("<!DOCTYPE html>\n")
html.write("<html>\n")
html.write("<head lang=\"en\">\n")
html.write("<meta charset=\"UTF-8\">\n")
html.write("<title>Jeff R CSC344 Assignments</title>\n")
html.write("<link rel=\"stylesheet\" href=\"bootstrap.min.css\">\n")
html.write("</head>\n")
html.write("<body class=\"container\" style=\"width:95%; min-height:100vh; border:2px solid black\">\n")
alldirs = getallpaths()
for currentdir in alldirs:
allfilesindir = getallfilenames(currentdir)
rowtitle = ''
if currentdir == "a1":
rowtitle = "A1-C Assignment"
elif currentdir == "a2":
rowtitle = "A2-Lisp Assignment"
elif currentdir == "a3":
rowtitle = "A3-Scala Assignment"
elif currentdir == "a4":
rowtitle = "A4-Prolog Assignment"
elif currentdir == "a5":
rowtitle = "A5-Python Assignment"
html.write("<table class=\"table table-striped table-bordered table-hover \"> \n")
html.write("<thead style=\"font-family:Algerian; font-weight:bold \"> \n")
html.write("<tr>")
html.write("<th class=\"col-xs-8\">\n")
html.write(rowtitle + "\n")
html.write("</th>\n")
html.write("<th class= \"col-xs-4\"> \n")
html.write("Line Count \n")
html.write("</th> \n")
html.write("</tr> \n")
html.write("</thead> \n")
html.write("<tbody style=\"font-family:Arial \"> \n")
for filename in allfilesindir:
html.write("<tr> \n")
filepath = ""
filelinecount = 0
fullname = "csc344/" + currentdir + "/" + filename
filelinecount = file_len(fullname)
if currentdir == "a5":
filepath = filename
else:
filepath = "../" + currentdir + "/" + filename
if filename != "index.html":
html.write("<td class=\"col-xs-8\">\n")
html.write("<a " + "href=" + "\"" + filepath + "\"" + ">")
html.write(filename)
html.write("</a>\n")
html.write("</td>")
html.write("<td class=\"col-xs-4\">\n")
html.write(str(filelinecount))
html.write("</td>")
html.write("</tr>\n")
html.write("</tbody>\n")
html.write("</table>\n")
html.write("</body>\n")
html.write("</html>")
html.close()
def makezips():
alldirs = getallpaths()
archive = zipfile.ZipFile("csc344/archive.zip", "w", zipfile.ZIP_DEFLATED)
for currentdir in alldirs:
allfilesindir = getallfilenames(currentdir)
for file in allfilesindir:
archive.write("csc344/" + currentdir + "/" + file)
archive.close()
return "archive.zip"
def sendemail(zipname):
messagehead = MIMEMultipart()
messagehead['From'] = userEmail
messagehead['to'] = toEmail
messagehead['subject'] = subject
messagebody = MIMEText(body)
zipattachment = MIMEBase("Assignments", "zip")
ziped = open("csc344/" + zipname, "rb")
zipattachment.set_payload(ziped.read())
encoders.encode_base64(zipattachment)
zipattachment.add_header("Content-Disposition", "Attachment; filename= " + zipname)
messagehead.attach(messagebody)
messagehead.attach(zipattachment)
mailserver = smtplib.SMTP("smtp.gmail.com:587")
mailserver.starttls()
mailserver.login(userEmail, passw)
mailserver.sendmail(userEmail, toEmail, messagehead.as_string())
mailserver.quit()
def processprojects():
alldirs = getallpaths()
allregex = "[_a-zA-Z][_a-zA-z0-9]*"
lispregex = "[-*+/a-zA-Z_][-a-zA-Z0-9_]*"
for currentdir in alldirs:
allfilesindir = getallfiles(currentdir)
for currentfile in allfilesindir:
regexcurrent = getregexed(allregex, currentfile)
first = regexcurrent.pop(0)
second = regexcurrent.pop(0)
fscon = first + "." + second
if first == "a2Lisp":
regexcurrent = getregexed(lispregex, currentfile)
if second == "c" or second == "lsp" or second == "scala" or second == "pl" or second == "py":
filename = os.path.join("csc344/" + currentdir, first + "-SymbolsFile.txt")
file = open(filename, "w")
file.write("Symbols file for " + fscon)
for curgex in regexcurrent:
file.write("\n" + "[" + fscon + "," + curgex + "]")
file.close()
postprocessfile(filename, first)
generatehtml()
archivename = makezips()
sendemail(archivename)
print("done, check your email!")
processprojects()
| {
"content_hash": "04f591f395df345caa263984757ef72c",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 108,
"avg_line_length": 30.58823529411765,
"alnum_prop": 0.5677197802197802,
"repo_name": "jregistr/Academia",
"id": "13f44a995b6784a37da7d960fa4877d0aa7344e7",
"size": "7280",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CSC344-Programming-Languages/A5-Python/csc344/a5/a5Python.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9640"
},
{
"name": "C",
"bytes": "26783"
},
{
"name": "CSS",
"bytes": "161326"
},
{
"name": "Common Lisp",
"bytes": "4503"
},
{
"name": "FreeMarker",
"bytes": "31427"
},
{
"name": "GLSL",
"bytes": "3859"
},
{
"name": "Groovy",
"bytes": "314959"
},
{
"name": "HTML",
"bytes": "426918"
},
{
"name": "Java",
"bytes": "948334"
},
{
"name": "JavaScript",
"bytes": "708937"
},
{
"name": "Kotlin",
"bytes": "20648"
},
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Prolog",
"bytes": "1130"
},
{
"name": "Python",
"bytes": "24771"
},
{
"name": "Scala",
"bytes": "103751"
},
{
"name": "Shell",
"bytes": "20351"
},
{
"name": "TypeScript",
"bytes": "67347"
}
],
"symlink_target": ""
} |
from flask import render_template, request
from . import music
from . service import recommend
@music.route('/search', methods=['GET'])
def search():
return render_template('cloudmusic/search.html')
@music.route('/result', methods=['POST'])
def result():
data = request.values.get('username')
user = recommend(data)
return render_template('cloudmusic/result.html', user=user)
| {
"content_hash": "4c6f694ac0a610974c8e3f0755e556dc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 24.8125,
"alnum_prop": 0.707808564231738,
"repo_name": "JR--Chen/flasky",
"id": "aa9e853be4a0dedd51856f8b57a69a08aeae1997",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/cloudmusic/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "332831"
},
{
"name": "HTML",
"bytes": "40033"
},
{
"name": "JavaScript",
"bytes": "139994"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "113702"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name="apollo",
version = '0.2',
maintainer='Luminoso, LLC',
maintainer_email='dev@lumino.so',
license = "LICENSE",
url = 'http://github.com/LuminosoInsight/apollo',
platforms = ["any"],
description = "A library for monitoring queues from an Apache Apollo message broker",
packages=find_packages(),
install_requires=[
'gevent',
'requests >= 1.0',
'credservice',
'syncstomp >= 0.5.5'
],
entry_points={
'console_scripts':
['apollo-monitor = apollo.cli:start_monitor',
'apollo-purge-unused-queues = apollo.cli:purge_queues',
]},
)
| {
"content_hash": "8530ad7ccab50c43da10f0aa89293a6f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 89,
"avg_line_length": 29,
"alnum_prop": 0.5977011494252874,
"repo_name": "pombredanne/apollo-1",
"id": "6b124de5a7c565b00aad553f6a59af985b65702b",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""A server node for the key value store."""
from __future__ import absolute_import
import ctypes
import sys
import pickle
import logging
from .base import _LIB, check_call
from .kvstore import create
class KVStoreServer(object):
"""The key-value store server."""
def __init__(self, kvstore):
"""Initialize a new KVStoreServer.
Parameters
----------
kvstore : KVStore
"""
self.kvstore = kvstore
self.handle = kvstore.handle
self.init_logginig = False
def _controller(self):
"""Return the server controller."""
def server_controller(cmd_id, cmd_body, _):
"""Server controler."""
if not self.init_logginig:
# the reason put the codes here is because we cannot get
# kvstore.rank earlier
head = '%(asctime)-15s Server[' + str(
self.kvstore.rank) + '] %(message)s'
logging.basicConfig(level=logging.DEBUG, format=head)
self.init_logginig = True
if cmd_id == 0: # what is the meaning?
try:
optimizer = pickle.loads(cmd_body) # deserialize the python object
except:
raise
self.kvstore.set_optimizer(optimizer)
else:
print ("server %d, unknown command (%d, %s)" % (
self.kvstore.rank, cmd_id, cmd_body))
return server_controller
def run(self):
"""Run the server, whose behavior is like.
>>> while receive(x):
... if is_command x: controller(x)
... else if is_key_value x: updater(x)
"""
_ctrl_proto = ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p, ctypes.c_void_p)
check_call(_LIB.MXKVStoreRunServer(self.handle, _ctrl_proto(self._controller()), None))
def _init_kvstore_server_module():
"""Start server/scheduler."""
is_worker = ctypes.c_int()
check_call(_LIB.MXKVStoreIsWorkerNode(ctypes.byref(is_worker)))
if is_worker.value == 0:
kvstore = create('dist') # start distributed parameter servers, whether sync or async will be specified by the master worker
server = KVStoreServer(kvstore)
server.run()
sys.exit()
_init_kvstore_server_module()
| {
"content_hash": "840e92aea10dba9512a125c70a0d976b",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 133,
"avg_line_length": 35,
"alnum_prop": 0.5735607675906184,
"repo_name": "yhpeng-git/mxnet",
"id": "d19c14ba2e750f339cae07ddacbb64fc5708ef43",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/kvstore_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "11053"
},
{
"name": "C",
"bytes": "88476"
},
{
"name": "C++",
"bytes": "4755504"
},
{
"name": "CMake",
"bytes": "147856"
},
{
"name": "Cuda",
"bytes": "3403191"
},
{
"name": "Java",
"bytes": "86766"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "141324"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "575202"
},
{
"name": "Perl 6",
"bytes": "21768"
},
{
"name": "Protocol Buffer",
"bytes": "78574"
},
{
"name": "Python",
"bytes": "2825418"
},
{
"name": "R",
"bytes": "255240"
},
{
"name": "Scala",
"bytes": "828520"
},
{
"name": "Shell",
"bytes": "120692"
}
],
"symlink_target": ""
} |
from flask_wtf import Form
from wtforms import StringField, SelectField, RadioField
from wtforms.validators import DataRequired
class AddAlarmForm(Form):
time_as_tuple = lambda arr : map(lambda e : (e, str(e) if len(str(e)) == 2 else '0'+str(e)), arr)
mode_as_tuple = lambda arr : map(lambda e : (e.lower(), e), arr)
name = StringField("Name")
hour = SelectField("Hour", choices=time_as_tuple(range(0, 24)))
minute = SelectField("Minutes", choices=time_as_tuple([0,15,30,45]))
mode = RadioField("Mode", choices=mode_as_tuple(["Weekdays", "Weekend", "All"]))
| {
"content_hash": "1d7c3856530d95061ee801e83b06182c",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 98,
"avg_line_length": 48.166666666666664,
"alnum_prop": 0.6833910034602076,
"repo_name": "simongarnier/rpi-alarm-web-ui",
"id": "f590a9b71e3aa533522ef2fe986d5e5c626ac2b9",
"size": "578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "735"
},
{
"name": "Python",
"bytes": "1759"
}
],
"symlink_target": ""
} |
import numpy as np
def learn_model(k, features, labels):
return k, features.copy(), labels.copy()
def plurality(xs):
from collections import defaultdict
counts = defaultdict(int)
for x in xs:
counts[x] += 1
maxv = max(counts.values())
for k, v in counts.items():
if v == maxv:
return k
def apply_model(features, model):
k, train_feats, labels = model
results = []
for f in features:
label_dist = []
for t, ell in zip(train_feats, labels):
label_dist.append((np.linalg.norm(f - t), ell))
label_dist.sort(key=lambda d_ell: d_ell[0])
label_dist = label_dist[:k]
results.append(plurality([ell for _, ell in label_dist]))
return np.array(results)
def accuracy(features, labels, model):
preds = apply_model(features, model)
return np.mean(preds == labels)
| {
"content_hash": "122fa9277a4ea952e7aa1780e5d36c6e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 65,
"avg_line_length": 26.058823529411764,
"alnum_prop": 0.6049661399548533,
"repo_name": "Nelca/buildMLSystem",
"id": "75519766935cf127cd660a3fcde82ea4d2413213",
"size": "1103",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ch02/knn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "69135856"
},
{
"name": "Makefile",
"bytes": "4816"
},
{
"name": "Python",
"bytes": "210634"
},
{
"name": "Shell",
"bytes": "635"
}
],
"symlink_target": ""
} |
import os
GITHUB_OAUTH_TOKEN = 'GITHUB_OAUTH_TOKEN'
GITHUB_REPOSITORIES = [
{
'org': 'codein',
'repo': 'poc'
},
]
##################
# LOCAL SETTINGS #
##################
# DO NOT PLACE ANYTHING BELOW THIS!!!!!!!
# Allow any settings to be defined in local_settings.py, which should be
# ignored in your version control system, allowing for settings to be
# defined per machine.
try:
from local_settings import * # NOQA
except ImportError as e:
if "local_settings" not in str(e):
raise e | {
"content_hash": "8dfc13e7f900df7d8982e400699c7d85",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 20.53846153846154,
"alnum_prop": 0.6067415730337079,
"repo_name": "codein/poc",
"id": "3dd427bdf5a652997e8aaf10276e45942edcc31c",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39529"
},
{
"name": "Shell",
"bytes": "388"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.