text stringlengths 4 1.02M | meta dict |
|---|---|
from contextlib import contextmanager
from errno import EACCES, EAGAIN
from fcntl import lockf, LOCK_NB
import os
import socket
def openlock(filename, operation, wait=True):
"""
Returns a file-like object that gets a fnctl() lock.
`operation` should be one of LOCK_SH or LOCK_EX for shared or
exclusive locks.
If `wait` is False, then openlock() will not block on trying to
acquire the lock.
"""
f = os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT, 0666), "r+")
if not wait:
operation |= LOCK_NB
try:
lockf(f.fileno(), operation)
except IOError, err:
if not wait and err.errno in (EACCES, EAGAIN):
from django.core.management.base import CommandError
raise CommandError("Could not acquire lock on '%s' held by %s." %
(filename, f.readline().strip()))
raise
print >>f, "%s:%d" % (socket.gethostname(), os.getpid())
f.truncate()
f.flush()
return f
@contextmanager
def lockfile(filename, operation, wait=True):
"""
Returns a context manager with a file-like object that gets a fnctl() lock.
Automatically closes and removes the lock upon completion.
`operation` should be one of LOCK_SH or LOCK_EX for shared or
exclusive locks.
If `wait` is False, then openlock() will not block on trying to
acquire the lock.
"""
path = os.path.abspath(filename)
f = openlock(filename=filename, operation=operation, wait=wait)
yield f
os.unlink(path)
f.close()
| {
"content_hash": "48635c72cb960df442f0277a8517226c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 30.45098039215686,
"alnum_prop": 0.6445589182227945,
"repo_name": "e-loue/django-lean",
"id": "ecf138b3a1b0a0354c3df50d2d6f6cd6d0cde96e",
"size": "1553",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django_lean/lockfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "2013"
},
{
"name": "Python",
"bytes": "300892"
}
],
"symlink_target": ""
} |
import math
import time
import random
class Trace(object):
"""
An L{ITrace} provider which delegates to zero or more L{ITracers} and
allows setting a default L{IEndpoint} to associate with L{IAnnotation}s
@ivar _tracers: C{list} of one or more L{ITracer} providers.
@ivar _endpoint: An L{IEndpoint} provider.
"""
def __init__(self, name, trace_id=None, span_id=None,
parent_span_id=None, tracers=None):
"""
@param name: C{str} describing the current span.
@param trace_id: C{int} or C{None}
@param span_id: C{int} or C{None}
@param parent_span_id: C{int} or C{None}
@param tracers: C{list} of L{ITracer} providers, primarily useful
for unit testing.
"""
self.name = name
# If no trace_id and span_id are given we want to generate new
# 64-bit integer ids.
self.trace_id = trace_id
self.span_id = span_id
# If no parent_span_id is given then we assume there is no parent span
# and leave it as None.
self.parent_span_id = parent_span_id
# If no tracers are given we get the global list of tracers.
self._tracers = tracers
# By default no endpoint will be associated with annotations recorded
# to this trace.
self._endpoint = None
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'{0.__class__.__name__}({0.name!r}, trace_id={0.trace_id!r}, '
'span_id={0.span_id!r}, parent_span_id={0.parent_span_id!r})'
).format(self)
def set_endpoint(self, endpoint):
"""
Set a default L{IEndpoint} provider for the current L{Trace}.
All annotations recorded after this endpoint is set will use it,
unless they provide their own endpoint.
"""
self._endpoint = endpoint
class Endpoint(object):
def __init__(self, ipv4, port, service_name):
"""
@param ipv4: C{str} ipv4 address.
@param port: C{int} port number.
@param service_name: C{str} service name.
"""
self.ipv4 = ipv4
self.port = port
self.service_name = service_name
def __ne__(self, other):
return not self == other
def __repr__(self):
return ('{0.__class__.__name__}({0.ipv4!r}, {0.port!r}, '
'{0.service_name!r})').format(self)
class Annotation(object):
def __init__(self, name, value, annotation_type, endpoint=None):
"""
@param name: C{str} name of this annotation.
@param value: A value of the appropriate type based on
C{annotation_type}.
@param annotation_type: C{str} the expected type of our C{value}.
@param endpoint: An optional L{IEndpoint} provider to associate with
this annotation or C{None}
"""
self.name = name
self.value = value
self.annotation_type = annotation_type
self.endpoint = endpoint
def __ne__(self, other):
return not self == other
def __repr__(self):
return (
'{0.__class__.__name__}({0.name!r}, {0.value!r}, '
'{0.annotation_type!r}, {0.endpoint})'
).format(self)
@classmethod
def timestamp(cls, name, timestamp=None):
if timestamp is None:
timestamp = math.trunc(time.time() * 1000 * 1000)
return cls(name, timestamp, 'timestamp')
@classmethod
def client_send(cls, timestamp=None):
return cls.timestamp(constants.CLIENT_SEND, timestamp)
@classmethod
def client_recv(cls, timestamp=None):
return cls.timestamp(constants.CLIENT_RECV, timestamp)
@classmethod
def server_send(cls, timestamp=None):
return cls.timestamp(constants.SERVER_SEND, timestamp)
@classmethod
def server_recv(cls, timestamp=None):
return cls.timestamp(constants.SERVER_RECV, timestamp)
@classmethod
def string(cls, name, value):
return cls(name, value, 'string')
@classmethod
def bytes(cls, name, value):
return cls(name, value, 'bytes')
| {
"content_hash": "3867d0783487518249fe6f4efe34e5a1",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 30.445255474452555,
"alnum_prop": 0.5900263725725245,
"repo_name": "marioskogias/blkin",
"id": "e0a3ed2c3a3ed76eecf722150e11cbc944338adc",
"size": "4171",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "babeltrace-plugins/zipkin/src/zipkin_logic/trace.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "34208"
},
{
"name": "C++",
"bytes": "17717"
},
{
"name": "Python",
"bytes": "28614"
}
],
"symlink_target": ""
} |
from rest_framework.exceptions import APIException
from rest_framework import status
# AC Service exceptions
class ACException(Exception):
msg = None
status = None
def __init__(self, msg=None, status=None):
# Set defaults (if not set in subclass)
if self.msg is None:
self.msg = ""
if self.status is None:
self.msg = -1
# Override with arguments (if any)
if msg is not None:
self.msg = msg
if status is not None:
self.status = status
def __repr__(self):
return self.msg
class ImproperlyConfiguredACService(ACException):
pass
class ACServiceDoesNotExist(ACException):
pass
class ACFieldTranslateException(ACException):
pass
class ACFilterParsingException(ACException):
pass
# AC API Exceptions
class ACAPIException(APIException):
@property
def msg(self):
return self.detail or self.default_detail
@property
def status(self):
return self.status_code
class ACAPIInvalidUrl(ACAPIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Invalid URL.'
default_code = 'invalid_url'
class ACAPIServiceDoesNotExist(ACAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = 'Requested service does not exist.'
default_code = 'service_does_not_exist'
class ACAPINoServiceAvailable(ACAPIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'No service is able to answer your request.'
default_code = 'no_service_available'
class ACAPIPageNotFound(ACAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = 'Page not found.'
default_code = 'page_not_found'
class ACAPIResourceDoesNotExist(ACAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = 'Requested resource does not exist.'
default_code = 'resource_not_found'
class ACAPIResponseDoesNotExist(ACAPIException):
status_code = status.HTTP_404_NOT_FOUND
default_detail = 'Requested response does not exist.'
default_code = 'response_not_found'
class ACAPIInvalidACID(ACAPIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Invalid Audio Commons Unique Identifier.'
default_code = 'invalid_acid'
class ACAPIInvalidCredentialsForService(ACAPIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Credentials to access third party service are invalid or have expired.'
default_code = 'invalid_credentials_for_service'
class ACAPIBadRequest(ACAPIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Bad request.'
default_code = 'bad_request'
class ACAPIUnsupportedFormat(ACAPIException):
status_code = status.HTTP_400_BAD_REQUEST
default_detail = 'Unsupported response format.'
default_code = 'bad_request'
| {
"content_hash": "0f3236d98645fb45ac3248b0b5a6f4f2",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 93,
"avg_line_length": 25.321739130434782,
"alnum_prop": 0.7060439560439561,
"repo_name": "AudioCommons/ac-mediator",
"id": "9da87b0350b1ac6ffa3da3d0d533cb0cff3602e5",
"size": "2912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ac_mediator/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5595"
},
{
"name": "Dockerfile",
"bytes": "1143"
},
{
"name": "HTML",
"bytes": "43305"
},
{
"name": "JavaScript",
"bytes": "337"
},
{
"name": "Python",
"bytes": "201502"
}
],
"symlink_target": ""
} |
"""
Random walk
-----------
"""
def random_walk(game):
"""
Solves MasterMind by throwing out random codes.
On average takes about 5 time n guesses, where n
is the number of possible solutions of the Mastermind Game.
For 6\ :sup:`4`, this means about 6000 guesses.
Returns the solution translated back into the game colors.
"""
trial = []
i = 0
while trial != game.challenge:
trial = game.create_code()
#print(trial)
i += 1
return [game.colordict[trial[_]] for _ in game.slots], i
| {
"content_hash": "ae7afc65a55759dd3ff8a583be806dfd",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.5803108808290155,
"repo_name": "Eberhofer/MasterMind",
"id": "e1c8fdfb0a0359ae1173d1c09b77a152715d6610",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MasterMind/solvers/random_walk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17144"
}
],
"symlink_target": ""
} |
"""
uru_crm.modules.veggies
~~~~~~~~~~~~~~~~~~
Available_Veggie model definition(s)
"""
from sqlalchemy import Column
from uru_crm.extensions import db
from uru_crm.utils import STRING_LEN
from uru_crm.modules.base import Base
class Available_Veggie(Base):
veggie = db.Column(db.String(STRING_LEN), nullable=False)
farm = db.Column(db.String(STRING_LEN), nullable=False)
quantity = db.Column(db.String(STRING_LEN), nullable=False)
| {
"content_hash": "e6ca6acedf8193f3ced8e0b93663532e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 63,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7,
"repo_name": "gitbenji/uru-crm",
"id": "05a7eaf3dc63aee1b283f44ffd07465f9653be1e",
"size": "484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uru_crm/modules/veggies/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16586"
},
{
"name": "HTML",
"bytes": "29055"
},
{
"name": "JavaScript",
"bytes": "1058"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "90564"
}
],
"symlink_target": ""
} |
"""Unittests for completion stages."""
from __future__ import print_function
import mock
from chromite.cbuildbot import commands
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import prebuilts
from chromite.cbuildbot.stages import completion_stages
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import generic_stages_unittest
from chromite.cbuildbot.stages import sync_stages_unittest
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import builder_status_lib
from chromite.lib import cidb
from chromite.lib import config_lib
from chromite.lib import constants
from chromite.lib import portage_util
from chromite.lib.buildstore import FakeBuildStore
# pylint: disable=protected-access
class ManifestVersionedSyncCompletionStageTest(
sync_stages_unittest.ManifestVersionedSyncStageTest):
"""Tests the ManifestVersionedSyncCompletion stage."""
# pylint: disable=abstract-method
BOT_ID = 'eve-release'
def setUp(self):
self.buildstore = FakeBuildStore()
def testManifestVersionedSyncCompletedSuccess(self):
"""Tests basic ManifestVersionedSyncStageCompleted on success"""
board_runattrs = self._run.GetBoardRunAttrs('eve')
board_runattrs.SetParallel('success', True)
update_status_mock = self.PatchObject(manifest_version.BuildSpecsManager,
'UpdateStatus')
stage = completion_stages.ManifestVersionedSyncCompletionStage(
self._run, self.buildstore, self.sync_stage, success=True)
stage.Run()
update_status_mock.assert_called_once_with(
message=None, success_map={self.BOT_ID: True})
def testManifestVersionedSyncCompletedFailure(self):
"""Tests basic ManifestVersionedSyncStageCompleted on failure"""
stage = completion_stages.ManifestVersionedSyncCompletionStage(
self._run, self.buildstore, self.sync_stage, success=False)
message = 'foo'
get_msg_mock = self.PatchObject(
generic_stages.BuilderStage,
'GetBuildFailureMessage',
return_value=message)
update_status_mock = self.PatchObject(manifest_version.BuildSpecsManager,
'UpdateStatus')
stage.Run()
update_status_mock.assert_called_once_with(
message='foo', success_map={self.BOT_ID: False})
get_msg_mock.assert_called_once_with()
def testManifestVersionedSyncCompletedIncomplete(self):
"""Tests basic ManifestVersionedSyncStageCompleted on incomplete build."""
stage = completion_stages.ManifestVersionedSyncCompletionStage(
self._run, self.buildstore, self.sync_stage, success=False)
stage.Run()
def testGetBuilderSuccessMap(self):
"""Tests that the builder success map is properly created."""
board_runattrs = self._run.GetBoardRunAttrs('eve')
board_runattrs.SetParallel('success', True)
builder_success_map = completion_stages.GetBuilderSuccessMap(
self._run, True)
expected_map = {self.BOT_ID: True}
self.assertEqual(expected_map, builder_success_map)
class MasterSlaveSyncCompletionStageMockConfigTest(
generic_stages_unittest.AbstractStageTestCase):
"""Tests MasterSlaveSyncCompletionStage with ManifestVersionedSyncStage."""
BOT_ID = 'master'
def setUp(self):
self.source_repo = 'ssh://source/repo'
self.manifest_version_url = 'fake manifest url'
self.branch = 'master'
self.build_type = constants.PFQ_TYPE
# Use our mocked out SiteConfig for all tests.
self.test_config = self._GetTestConfig()
self._Prepare(site_config=self.test_config)
self.buildstore = FakeBuildStore()
def ConstructStage(self):
sync_stage = sync_stages.ManifestVersionedSyncStage(self._run,
self.buildstore)
return completion_stages.MasterSlaveSyncCompletionStage(
self._run, self.buildstore, sync_stage, success=True)
def _GetTestConfig(self):
test_config = config_lib.SiteConfig()
test_config.Add(
'master',
config_lib.BuildConfig(),
boards=[],
build_type=self.build_type,
master=True,
slave_configs=['test3', 'test5'],
manifest_version=True,
)
test_config.Add(
'test1',
config_lib.BuildConfig(),
boards=['amd64-generic'],
manifest_version=True,
build_type=constants.PFQ_TYPE,
overlays='public',
important=False,
chrome_rev=None,
branch=False,
internal=False,
master=False,
)
test_config.Add(
'test2',
config_lib.BuildConfig(),
boards=['amd64-generic'],
manifest_version=False,
build_type=constants.PFQ_TYPE,
overlays='public',
important=True,
chrome_rev=None,
branch=False,
internal=False,
master=False,
)
test_config.Add(
'test3',
config_lib.BuildConfig(),
boards=['amd64-generic'],
manifest_version=True,
build_type=constants.PFQ_TYPE,
overlays='both',
important=True,
chrome_rev=None,
branch=False,
internal=True,
master=False,
)
test_config.Add(
'test4',
config_lib.BuildConfig(),
boards=['amd64-generic'],
manifest_version=True,
build_type=constants.PFQ_TYPE,
overlays='both',
important=True,
chrome_rev=None,
branch=True,
internal=True,
master=False,
)
test_config.Add(
'test5',
config_lib.BuildConfig(),
boards=['amd64-generic'],
manifest_version=True,
build_type=constants.PFQ_TYPE,
overlays='public',
important=True,
chrome_rev=None,
branch=False,
internal=False,
master=False,
)
return test_config
def testGetSlavesForMaster(self):
"""Tests that we get the slaves for a fake unified master configuration."""
stage = self.ConstructStage()
p = stage._GetSlaveConfigs()
self.assertEqual([self.test_config['test3'], self.test_config['test5']], p)
class CanaryCompletionStageTest(generic_stages_unittest.AbstractStageTestCase):
"""Tests how canary master handles failures in CanaryCompletionStage."""
BOT_ID = 'master-release'
# We duplicate __init__ to specify a default for bot_id.
# pylint: disable=arguments-differ,useless-super-delegation
def _Prepare(self, bot_id=BOT_ID, **kwargs):
super(CanaryCompletionStageTest, self)._Prepare(bot_id, **kwargs)
def setUp(self):
self.build_type = constants.CANARY_TYPE
self._Prepare()
self.buildstore = FakeBuildStore()
def ConstructStage(self):
"""Returns a CanaryCompletionStage object."""
sync_stage = sync_stages.ManifestVersionedSyncStage(self._run,
self.buildstore)
return completion_stages.CanaryCompletionStage(
self._run, self.buildstore, sync_stage, success=True)
def testGetBuilderStatusesFetcher(self):
"""Test GetBuilderStatusesFetcher."""
mock_fetcher = mock.Mock()
self.PatchObject(
builder_status_lib, 'BuilderStatusesFetcher', return_value=mock_fetcher)
mock_wait = self.PatchObject(
completion_stages.MasterSlaveSyncCompletionStage,
'_WaitForSlavesToComplete')
stage = self.ConstructStage()
stage._run.attrs.manifest_manager = mock.Mock()
self.assertEqual(stage._GetBuilderStatusesFetcher(), mock_fetcher)
self.assertEqual(mock_wait.call_count, 1)
class PublishUprevChangesStageTest(
generic_stages_unittest.AbstractStageTestCase):
"""Tests for the PublishUprevChanges stage."""
BOT_ID = 'master-vmmst-android-pfq'
def setUp(self):
self.PatchObject(completion_stages.PublishUprevChangesStage,
'_GetPortageEnvVar')
overlays_map = {
constants.BOTH_OVERLAYS: ['ext', 'int'],
constants.PUBLIC_OVERLAYS: ['ext'],
constants.PRIVATE_OVERLAYS: ['int'],
}
self.PatchObject(portage_util, 'FindOverlays',
side_effect=lambda o, buildroot: overlays_map[o])
self.PatchObject(prebuilts.BinhostConfWriter, 'Perform')
self.push_mock = self.PatchObject(commands, 'UprevPush')
self.PatchObject(generic_stages.BuilderStage, 'GetRepoRepository')
self.PatchObject(commands, 'UprevPackages')
self._Prepare()
self.buildstore = FakeBuildStore()
def ConstructStage(self):
sync_stage = sync_stages.ManifestVersionedSyncStage(self._run,
self.buildstore)
sync_stage.pool = mock.MagicMock()
return completion_stages.PublishUprevChangesStage(
self._run, self.buildstore, sync_stage, success=True)
def testCheckSlaveUploadPrebuiltsTest(self):
"""Tests for CheckSlaveUploadPrebuiltsTest."""
stage = self.ConstructStage()
stage._build_stage_id = 'test_build_stage_id'
mock_cidb = mock.MagicMock()
cidb.CIDBConnectionFactory.SetupMockCidb(mock_cidb)
stage_name = 'UploadPrebuilts'
slave_a = 'slave_a'
slave_b = 'slave_b'
slave_c = 'slave_c'
slave_configs_a = [{'name': slave_a}, {'name': slave_b}]
slave_stages_a = [{'name': stage_name,
'build_config': slave_a,
'status': constants.BUILDER_STATUS_PASSED},
{'name': stage_name,
'build_config': slave_b,
'status': constants.BUILDER_STATUS_PASSED}]
self.PatchObject(
completion_stages.PublishUprevChangesStage,
'_GetSlaveConfigs',
return_value=slave_configs_a)
self.PatchObject(FakeBuildStore, 'GetBuildStatuses', return_value=[])
self.PatchObject(FakeBuildStore, 'GetBuildsStages',
return_value=slave_stages_a)
# All important slaves are covered
self.assertTrue(stage.CheckSlaveUploadPrebuiltsTest())
slave_stages_b = [{'name': stage_name,
'build_config': slave_a,
'status': constants.BUILDER_STATUS_FAILED},
{'name': stage_name,
'build_config': slave_b,
'status': constants.BUILDER_STATUS_PASSED}]
self.PatchObject(
completion_stages.PublishUprevChangesStage,
'_GetSlaveConfigs',
return_value=slave_configs_a)
self.PatchObject(FakeBuildStore, 'GetBuildsStages',
return_value=slave_stages_b)
# Slave_a didn't pass the stage
self.assertFalse(stage.CheckSlaveUploadPrebuiltsTest())
slave_configs_b = [{'name': slave_a}, {'name': slave_b}, {'name': slave_c}]
self.PatchObject(
completion_stages.PublishUprevChangesStage,
'_GetSlaveConfigs',
return_value=slave_configs_b)
self.PatchObject(FakeBuildStore, 'GetBuildsStages',
return_value=slave_stages_a)
# No stage information for slave_c
self.assertFalse(stage.CheckSlaveUploadPrebuiltsTest())
def testAndroidPush(self):
"""Test values for PublishUprevChanges with Android PFQ."""
self._Prepare(
bot_id=constants.PI_ANDROID_PFQ_MASTER,
extra_config={'push_overlays': constants.PUBLIC_OVERLAYS},
extra_cmd_args=['--android_rev', constants.ANDROID_REV_LATEST])
self._run.options.prebuilts = True
self.RunStage()
self.push_mock.assert_called_once_with(
self.build_root, overlay_type='public', dryrun=False)
self.assertTrue(self._run.attrs.metadata.GetValue('UprevvedAndroid'))
metadata_dict = self._run.attrs.metadata.GetDict()
self.assertNotIn('UprevvedChrome', metadata_dict)
def testPerformStageOnChromePFQ(self):
"""Test PerformStage on ChromePFQ."""
stage = self.ConstructStage()
stage.PerformStage()
self.push_mock.assert_called_once_with(
self.build_root, overlay_type='both', dryrun=False)
| {
"content_hash": "fed24869f74703c0d0551428cceb3f98",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 80,
"avg_line_length": 35.51632047477745,
"alnum_prop": 0.6642994402205699,
"repo_name": "endlessm/chromium-browser",
"id": "c64a862c12dc8ec4b7524855ee2e0444818e707d",
"size": "12163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/cbuildbot/stages/completion_stages_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.utils import timezone
from sentry.testutils import AcceptanceTestCase
class ProjectIssuesTest(AcceptanceTestCase):
def setUp(self):
super(ProjectIssuesTest, self).setUp()
self.user = self.create_user('foo@example.com')
self.org = self.create_organization(owner=self.user, name='Rowdy Tiger')
self.team = self.create_team(organization=self.org, name='Mariachi Band')
self.project = self.create_project(
organization=self.org,
team=self.team,
name='Bengal',
)
self.login_as(self.user)
self.path = '/{}/{}/'.format(self.org.slug, self.project.slug)
# TODO(dcramer): abstract fixtures into a basic set that is present for
# all acceptance tests
def test_not_setup(self):
# TODO(dcramer): we should add basic assertions around "i wanted this
# URL but was sent somewhere else"
self.browser.get(self.path)
self.browser.wait_until('.awaiting-events')
self.browser.snapshot('project issues not configured')
def test_with_issues(self):
self.project.update(first_event=timezone.now())
self.create_group(
project=self.project,
message='Foo bar',
)
self.browser.get(self.path)
self.browser.wait_until('.group-list')
self.browser.wait_until('.barchart')
self.browser.snapshot('project issues with issues')
def test_with_no_issues(self):
self.project.update(first_event=timezone.now())
self.browser.get(self.path)
self.browser.wait_until('.empty-stream')
self.browser.snapshot('project issues without issues')
| {
"content_hash": "1cdb570f6507036c030e097bfadee8b0",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 37.608695652173914,
"alnum_prop": 0.6473988439306358,
"repo_name": "jean/sentry",
"id": "45f2dfff7986c7a0076a523d705b1d42798c58f9",
"size": "1730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/acceptance/test_project_issues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
} |
import numpy as np
import os.path as op
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
import mne
from mne import Epochs, read_events, pick_types, create_info, EpochsArray
from mne.io import read_raw_fif
from mne.utils import (_TempDir, run_tests_if_main, slow_test, requires_h5py,
grand_average)
from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,
tfr_multitaper, AverageTFR, read_tfrs,
write_tfrs, combine_tfr, cwt, _compute_tfr,
EpochsTFR)
from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet
from mne.viz.utils import _fake_click
from itertools import product
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
raw_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
event_fname = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-eve.fif')
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert_true(np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert_true(np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test the to-be-deprecated time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
assert_raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
assert_raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_picks_avg.data)
assert_array_almost_equal(itc.data, itc_picks.data)
assert_array_almost_equal(power.data, power_evoked.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert_true('meg' in power)
assert_true('grad' in power)
assert_false('mag' in power)
assert_false('eeg' in power)
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(power_.data.shape == (len(picks), len(freqs), 2))
assert_true(power_.data.shape == itc_.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2]))
assert_equal(itc2.ch_names[1:], gave.ch_names)
assert_equal(gave.nave, 2)
itc2.drop_channels(itc2.info["bads"])
assert_array_almost_equal(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_array_almost_equal(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert_true(power.data.shape == (len(picks), len(freqs), len(times)))
assert_true(power.data.shape == itc.data.shape)
assert_true(np.sum(itc.data >= 1) == 0)
assert_true(np.sum(itc.data <= 0) == 0)
tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
return_itc=False).data[0]
assert_true(tfr.shape == (len(picks), len(freqs), len(times)))
tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2), average=False,
return_itc=False).data[0]
assert_true(tfr2.shape == (len(picks), len(freqs), 2))
single_power = tfr_morlet(epochs, freqs, 2, average=False,
return_itc=False).data
single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
average=False, return_itc=False).data
single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
average=False, return_itc=False).data
single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
average=False, return_itc=False).data
assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
assert_array_almost_equal(np.mean(single_power2, axis=0),
power.data[:, :, :2])
assert_array_almost_equal(np.mean(single_power3, axis=0),
power.data[:, :, 1:3])
assert_array_almost_equal(np.mean(single_power4, axis=0),
power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = list(range(50, 55))
decim = 2
_, n_chan, n_time = data.shape
tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
return_itc=False).data[0]
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
assert_raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
# XXX JRK: full wavelet decomposition needs to be implemented
if (not use_fft) and mode == 'full':
assert_raises(ValueError, cwt, data[0, :, :], Ws,
use_fft=use_fft, mode=mode)
continue
cwt(data[0, :, :], Ws, use_fft=use_fft, mode=mode)
# Test decim parameter checks
assert_raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert_true(len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert_true(np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert_true(len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@slow_test
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
assert_raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_averaged.data)
assert_array_almost_equal(power.times, power_epochs.times)
assert_array_almost_equal(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
assert_raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert_true(tmax > 0.3 and tmax < 0.7)
assert_false(np.any(itc.data < 0.))
assert_true(fmax > 40 and fmax < 60)
assert_true(power2.data.shape == (len(picks), len(freqs), 2))
assert_true(power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
assert_raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
assert_raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities."""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
assert_raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert_true(isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
assert_raises(ValueError, read_tfrs, fname, condition='nonono')
# Test save of EpochsTFR.
data = np.zeros((5, 3, 2, 3))
tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,
comment='test', method='crazy-tfr')
tfr.save(fname, True)
read_tfr = read_tfrs(fname)[0]
assert_array_equal(tfr.data, read_tfr.data)
def test_plot():
"""Test TFR plotting."""
import matplotlib.pyplot as plt
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title', colorbar=False)
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert_true(all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names))
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
assert_true(ch in tfr_new.ch_names for ch in tfr.ch_names)
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert_true(all(ch not in tfr_new.ch_names
for ch in tfr_stim.ch_names))
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
assert_raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
assert_raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
assert_raises(ValueError, tfr_meg.add_channels, [tfr_meg])
assert_raises(AssertionError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for func, use_fft, zero_mean, output in product(
(tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (func == tfr_array_multitaper) and (output == 'phase'):
assert_raises(NotImplementedError, func, data, sfreq=sfreq,
frequencies=freqs, output=output)
continue
# Check runs
out = func(data, sfreq=sfreq, frequencies=freqs, use_fft=use_fft,
zero_mean=zero_mean, n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex, out.dtype)
else:
assert_equal(np.float, out.dtype)
assert_true(np.all(np.isfinite(out)))
# Check errors params
for _data in (None, 'foo', data[0]):
assert_raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
assert_raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
assert_raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
# No time_bandwidth param in morlet
assert_raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
assert_raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert_true(np.sum(out >= 1) == 0)
assert_true(np.sum(out <= 0) == 0)
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
output='avg_power', n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
run_tests_if_main()
| {
"content_hash": "268a97848b16446c412706ad59d55e85",
"timestamp": "",
"source": "github",
"line_count": 563,
"max_line_length": 79,
"avg_line_length": 40.38721136767318,
"alnum_prop": 0.5876066496613599,
"repo_name": "jaeilepp/mne-python",
"id": "6080075b7948f14b2dbe8eb18fb8a3bcf476d6aa",
"size": "22738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mne/time_frequency/tests/test_tfr.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6113850"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
import io
import os
import sys
import time
import base64
from IOST_Prepare import *
from IOST_Config import *
from IOST_Basic import *
from IOST_Host import *
import vte
import ConfigParser
import pango
#======================================================================
shortcuts={}
_COPY = ["copy"]
_PASTE = ["paste"]
_COPY_ALL = ["copy_all"]
_SAVE = ["save"]
_FIND = ["find"]
_CLEAR = ["reset"]
_FIND_NEXT = ["find_next"]
_FIND_BACK = ["find_back"]
_CONSOLE_PREV = ["console_previous"]
_CONSOLE_NEXT = ["console_next"]
_CONSOLE_1 = ["console_1"]
_CONSOLE_2 = ["console_2"]
_CONSOLE_3 = ["console_3"]
_CONSOLE_4 = ["console_4"]
_CONSOLE_5 = ["console_5"]
_CONSOLE_6 = ["console_6"]
_CONSOLE_7 = ["console_7"]
_CONSOLE_8 = ["console_8"]
_CONSOLE_9 = ["console_9"]
_CONSOLE_CLOSE = ["console_close"]
_CONSOLE_RECONNECT = ["console_reconnect"]
_CONNECT = ["connect"]
#======================================================================
class IOST_Terminal(IOST_Basic, IOST_Define, IOST_Host):
"""
This is a class to register and control a terminal in python use vte module
"""
#----------------------------------------------------------------------
def __init__(self, notebook, host_name):
"""
Initialization to register a vte terminal
"""
try:
#------------------------------------
print "1"
self.IOST_vte = vte.Terminal()
self.IOST_vte.set_word_chars(IOST_Define.WORD_SEPARATORS)
self.IOST_vte.set_scrollback_lines(IOST_Define.BUFFER_LINES)
if self.IOST_vte.get_emulation() != os.getenv("TERM"):
os.environ['TERM'] = self.IOST_vte.get_emulation()
#------------------------------------
if isinstance(host_name, basestring):
iost_host = IOST_Host('', host_name)
iost_host.__print_arg__()
fcolor = iost_host.font_color
bcolor = iost_host.back_color
#------------------------------------
if fcolor == '' or fcolor ==None or bcolor == '' or bcolor ==None:
fcolor = IOST_Define.FONT_COLOR
bcolor = IOST_Define.BACKGROUND_COLOR
#------------------------------------
if len(fcolor) > 0 and len(bcolor) > 0:
self.IOST_vte.set_colors(gtk.gdk.Color(fcolor), gtk.gdk.Color(bcolor), [])
print "2"
#------------------------------------
if len(IOST_Define.FONT) == 0:
IOST_Define.FONT = 'monospace'
else:
self.IOST_vte.set_font(pango.FontDescription(IOST_Define.FONT))
#------------------------------------
# scrollPane = gtk.ScrolledWindow()
# scrollPane.connect('button_press_event', lambda *args: True)
#------------------------------------
self.IOST_vte.connect("child-exited", lambda object: None)
self.IOST_vte.connect("focus", self.on_Terminal_Tab_focus )
self.IOST_vte.connect("button_press_event", self.on_Terminal_click )
self.IOST_vte.connect("key_press_event", self.on_Terminal_keypress )
self.IOST_vte.connect("selection-changed", self.on_Terminal_selection )
print "3"
#------------------------------------
self.real_transparency = False
if IOST_Define.TRANSPARENCY > 0:
self.real_transparency = self.get_real_transparency()
if not self.self.real_transparency:
self.IOST_vte.set_background_transparent(True)
self.IOST_vte.set_background_saturation(IOST_Define.TRANSPARENCY / 100.0)
if len(bcolor) > 0:
self.IOST_vte.set_background_tint_color(gtk.gdk.Color(bcolor))
else:
self.IOST_vte.set_opacity(int( (100 - conf.TRANSPARENCY) / 100.0 * 65535) )
self.IOST_vte.set_backspace_binding(iost_host.backspace_key)
self.IOST_vte.set_delete_binding(iost_host.delete_key)
except:
iost_basic = IOST_Basic()
iost_basic.MsgBox("%s: %s" %(("Error to create a Terminal"), sys.exc_info()[1]))
#----------------------------------------------------------------------
def on_Terminal_Tab_focus(self, object, *args):
if isinstance(object, vte.Terminal):
self.current = object
#----------------------------------------------------------------------
def on_Terminal_click(self, object, event, *args):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
if IOST_Define.PASTE_ON_RIGHT_CLICK:
object.pass_clipboard()
else:
pass
#Need add code more here
return True
#----------------------------------------------------------------------
def on_Terminal_keypress(self, object, event, *args):
IOST_Basic.__init__(self)
if shortcuts.has_key(self.GetKeyName(event)):
cmd = shortcuts[self.GetKeyName(event)]
if type(cmd) == list:
if cmd == _COPY:
self.Terminal_Copy(object)
elif cmd == _PASTE:
self.Terminal_Paste(object)
elif cmd == _COPY_ALL:
self.Terminal_CopyAll(object)
elif cmd == _SAVE:
self.Terminal_ShowSaveBuffer(object)
elif cmd == _FIND:
pass
elif cmd == _FIND_NEXT:
if hasattr(self, 'search'):
self.Terminal_FindWord()
elif cmd == _CLEAR:
object.reset(True, True)
elif cmd == _FIND_BACK:
if hasattr(self, 'seatch'):
self.Terminal_FindWord(backwards=True)
elif cmd == _CONSOLE_PREV:
object.get_parent().get_parent().prev_page()
elif cmd == _CONSOLE_NEXT:
object.get_parent().get_parent().next_page()
elif cmd == _CONSOLE_CLOSE:
obj_id = object.get_parent()
page = object.get_parent().get_parent().page_num(obj_id)
if page != -1:
object.get_parent().get_parent().remove_page(page)
obj_id.destroy()
elif cmd == _CONSOLE_RECONNECT:
if not hasattr(object, "command"):
object.fork_command(SHELL)
else:
object.fork_command(object.command[0], object.command[1])
while gtk.events_pending():
gtk.main_iteration(False)
if object.command[2] != None and object.command[2]!='':
gobject.timeout_add(2000, self.Terminal_SendData, object, object.command[2])
object.get_parent().get_parent().get_tab_label(object.get_parent()).Terminal_MarkTabActive()
elif cmd == _CONNECT:
pass
elif cmd[0][0:8] == "console_":
page = int(cmd[0][8:]) - 1
object.get_parent().get_parent().set_current_page(page)
else:
object.feed_child(cmd)
return True
return False
#----------------------------------------------------------------------
def on_Terminal_selection(self, object, *args):
if IOST_Define.AUTO_COPY_SELECTION:
self.Terminal_Copy(object)
return True
#----------------------------------------------------------------------
def Terminal_Copy(self, terminal):
""
terminal.copy_clipboard()
#----------------------------------------------------------------------
def Terminal_Paste(self, terminal):
""
terminal.paste_clipboard()
#----------------------------------------------------------------------
def Terminal_CopyAndPaste(self, terminal):
""
terminal.copy_clipboard()
terminal.paste_clipboard()
#----------------------------------------------------------------------
def Terminal_SelectAll(self, terminal):
""
terminal.select_all()
#----------------------------------------------------------------------
def Terminal_CopyAll(self, terminal):
""
terminal.select_all()
terminal.copy_clipboard()
terminal.select_none()
#----------------------------------------------------------------------
def Terminal_ShowSaveBuffer(self, terminal):
""
#----------------------------------------------------------------------
def Terminal_FindWord(self, backwards=False):
""
pass
#----------------------------------------------------------------------
def Terminal_SendData(self, terminal, data):
terminal.feed_child('%s\r' %(data))
return False
#----------------------------------------------------------------------
def Terminal_MarkTabActive(self):
# self.label.set_markup("%s" % (self.label.get_text()))
# self.is_active = True
pass
#----------------------------------------------------------------------
def Terminal_GetRealTransparency(self):
# add code more here
return self.real_transparency
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
| {
"content_hash": "d6ef03fc56656d043e913483278e6dc6",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 112,
"avg_line_length": 41.4169741697417,
"alnum_prop": 0.3776728439059159,
"repo_name": "HPPTECH/hpp_IOSTressTest",
"id": "af56318f9964109d3dd183bf2b420d524af9a664",
"size": "11746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Refer/IOST_OLD_SRC/IOST_0.13/IOST_Terminal.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5571"
},
{
"name": "C",
"bytes": "5083"
},
{
"name": "CSS",
"bytes": "53608"
},
{
"name": "HTML",
"bytes": "2732176"
},
{
"name": "JavaScript",
"bytes": "945408"
},
{
"name": "Makefile",
"bytes": "5568"
},
{
"name": "Python",
"bytes": "5810318"
},
{
"name": "Shell",
"bytes": "21948"
}
],
"symlink_target": ""
} |
"""
Tests for Timestamp timezone-related methods
"""
from datetime import datetime, date, timedelta
from distutils.version import LooseVersion
import pytest
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
import dateutil
from dateutil.tz import gettz, tzoffset
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import Timestamp, NaT
from pandas.errors import OutOfBoundsDatetime
class TestTimestampTZOperations(object):
# --------------------------------------------------------------
# Timestamp.tz_localize
def test_tz_localize_pushes_out_of_bounds(self):
# GH#12677
# tz_localize that pushes away from the boundary is OK
pac = Timestamp.min.tz_localize('US/Pacific')
assert pac.value > Timestamp.min.value
pac.tz_convert('Asia/Tokyo') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.min.tz_localize('Asia/Tokyo')
# tz_localize that pushes away from the boundary is OK
tokyo = Timestamp.max.tz_localize('Asia/Tokyo')
assert tokyo.value < Timestamp.max.value
tokyo.tz_convert('US/Pacific') # tz_convert doesn't change value
with pytest.raises(OutOfBoundsDatetime):
Timestamp.max.tz_localize('US/Pacific')
def test_tz_localize_ambiguous_bool(self):
# make sure that we are correctly accepting bool values as ambiguous
# GH#14402
ts = Timestamp('2015-11-01 01:00:03')
expected0 = Timestamp('2015-11-01 01:00:03-0500', tz='US/Central')
expected1 = Timestamp('2015-11-01 01:00:03-0600', tz='US/Central')
with pytest.raises(pytz.AmbiguousTimeError):
ts.tz_localize('US/Central')
result = ts.tz_localize('US/Central', ambiguous=True)
assert result == expected0
result = ts.tz_localize('US/Central', ambiguous=False)
assert result == expected1
def test_tz_localize_ambiguous(self):
ts = Timestamp('2014-11-02 01:00')
ts_dst = ts.tz_localize('US/Eastern', ambiguous=True)
ts_no_dst = ts.tz_localize('US/Eastern', ambiguous=False)
assert (ts_no_dst.value - ts_dst.value) / 1e9 == 3600
with pytest.raises(ValueError):
ts.tz_localize('US/Eastern', ambiguous='infer')
# GH#8025
with tm.assert_raises_regex(TypeError,
'Cannot localize tz-aware Timestamp, '
'use tz_convert for conversions'):
Timestamp('2011-01-01', tz='US/Eastern').tz_localize('Asia/Tokyo')
with tm.assert_raises_regex(TypeError,
'Cannot convert tz-naive Timestamp, '
'use tz_localize to localize'):
Timestamp('2011-01-01').tz_convert('Asia/Tokyo')
@pytest.mark.parametrize('stamp, tz', [
('2015-03-08 02:00', 'US/Eastern'),
('2015-03-08 02:30', 'US/Pacific'),
('2015-03-29 02:00', 'Europe/Paris'),
('2015-03-29 02:30', 'Europe/Belgrade')])
def test_tz_localize_nonexistent(self, stamp, tz):
# GH#13057
ts = Timestamp(stamp)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz)
with pytest.raises(NonExistentTimeError):
ts.tz_localize(tz, errors='raise')
assert ts.tz_localize(tz, errors='coerce') is NaT
def test_tz_localize_errors_ambiguous(self):
# GH#13057
ts = Timestamp('2015-11-1 01:00')
with pytest.raises(AmbiguousTimeError):
ts.tz_localize('US/Pacific', errors='coerce')
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_localize_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp)
localized = ts.tz_localize(tz)
assert localized == Timestamp(stamp, tz=tz)
with pytest.raises(TypeError):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset == ts
assert reset.tzinfo is None
def test_tz_localize_ambiguous_compat(self):
# validate that pytz and dateutil are compat for dst
# when the transition happens
naive = Timestamp('2013-10-27 01:00:00')
pytz_zone = 'Europe/London'
dateutil_zone = 'dateutil/Europe/London'
result_pytz = naive.tz_localize(pytz_zone, ambiguous=0)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=0)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382835600000000000
if LooseVersion(dateutil.__version__) < LooseVersion('2.6.0'):
# dateutil 2.6 buggy w.r.t. ambiguous=0
# see gh-14621
# see https://github.com/dateutil/dateutil/issues/321
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
assert str(result_pytz) == str(result_dateutil)
elif LooseVersion(dateutil.__version__) > LooseVersion('2.6.0'):
# fixed ambiguous behavior
assert result_pytz.to_pydatetime().tzname() == 'GMT'
assert result_dateutil.to_pydatetime().tzname() == 'BST'
assert str(result_pytz) != str(result_dateutil)
# 1 hour difference
result_pytz = naive.tz_localize(pytz_zone, ambiguous=1)
result_dateutil = naive.tz_localize(dateutil_zone, ambiguous=1)
assert result_pytz.value == result_dateutil.value
assert result_pytz.value == 1382832000000000000
# dateutil < 2.6 is buggy w.r.t. ambiguous timezones
if LooseVersion(dateutil.__version__) > LooseVersion('2.5.3'):
# see gh-14621
assert str(result_pytz) == str(result_dateutil)
assert (result_pytz.to_pydatetime().tzname() ==
result_dateutil.to_pydatetime().tzname())
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_tz_localize(self, tz):
stamp = Timestamp('3/11/2012 04:00')
result = stamp.tz_localize(tz)
expected = Timestamp('3/11/2012 04:00', tz=tz)
assert result.hour == expected.hour
assert result == expected
# ------------------------------------------------------------------
# Timestamp.tz_convert
@pytest.mark.parametrize('stamp', ['2014-02-01 09:00', '2014-07-08 09:00',
'2014-11-01 17:00', '2014-11-05 00:00'])
def test_tz_convert_roundtrip(self, stamp, tz_aware_fixture):
tz = tz_aware_fixture
ts = Timestamp(stamp, tz='UTC')
converted = ts.tz_convert(tz)
reset = converted.tz_convert(None)
assert reset == Timestamp(stamp)
assert reset.tzinfo is None
assert reset == converted.tz_convert('UTC').tz_localize(None)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_astimezone(self, tzstr):
# astimezone is an alias for tz_convert, so keep it with
# the tz_convert tests
utcdate = Timestamp('3/11/2012 22:00', tz='UTC')
expected = utcdate.tz_convert(tzstr)
result = utcdate.astimezone(tzstr)
assert expected == result
assert isinstance(result, Timestamp)
@td.skip_if_windows
def test_tz_convert_utc_with_system_utc(self):
from pandas._libs.tslibs.timezones import maybe_get_tz
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# from system utc to real utc
ts = Timestamp('2001-01-05 11:56', tz=maybe_get_tz('dateutil/UTC'))
# check that the time hasn't changed.
assert ts == ts.tz_convert(dateutil.tz.tzutc())
# ------------------------------------------------------------------
# Timestamp.__init__ with tz str or tzinfo
def test_timestamp_constructor_tz_utc(self):
utc_stamp = Timestamp('3/11/2012 05:00', tz='utc')
assert utc_stamp.tzinfo is pytz.utc
assert utc_stamp.hour == 5
utc_stamp = Timestamp('3/11/2012 05:00').tz_localize('utc')
assert utc_stamp.hour == 5
def test_timestamp_to_datetime_tzoffset(self):
tzinfo = tzoffset(None, 7200)
expected = Timestamp('3/11/2012 04:00', tz=tzinfo)
result = Timestamp(expected.to_pydatetime())
assert expected == result
def test_timestamp_constructor_near_dst_boundary(self):
# GH#11481 & GH#15777
# Naive string timestamps were being localized incorrectly
# with tz_convert_single instead of tz_localize_to_utc
for tz in ['Europe/Brussels', 'Europe/Prague']:
result = Timestamp('2015-10-25 01:00', tz=tz)
expected = Timestamp('2015-10-25 01:00').tz_localize(tz)
assert result == expected
with pytest.raises(pytz.AmbiguousTimeError):
Timestamp('2015-10-25 02:00', tz=tz)
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00').tz_localize('Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
# GH#11708
naive = Timestamp('2015-11-18 10:00:00')
result = naive.tz_localize('UTC').tz_convert('Asia/Kolkata')
expected = Timestamp('2015-11-18 15:30:00+0530', tz='Asia/Kolkata')
assert result == expected
# GH#15823
result = Timestamp('2017-03-26 00:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 00:00:00+0100', tz='Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 01:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 01:00:00+0100', tz='Europe/Paris')
assert result == expected
with pytest.raises(pytz.NonExistentTimeError):
Timestamp('2017-03-26 02:00', tz='Europe/Paris')
result = Timestamp('2017-03-26 02:00:00+0100', tz='Europe/Paris')
naive = Timestamp(result.value)
expected = naive.tz_localize('UTC').tz_convert('Europe/Paris')
assert result == expected
result = Timestamp('2017-03-26 03:00', tz='Europe/Paris')
expected = Timestamp('2017-03-26 03:00:00+0200', tz='Europe/Paris')
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_constructed_by_date_and_tz(self, tz):
# GH#2993, Timestamp cannot be constructed by datetime.date
# and tz correctly
result = Timestamp(date(2012, 3, 11), tz=tz)
expected = Timestamp('3/11/2012', tz=tz)
assert result.hour == expected.hour
assert result == expected
@pytest.mark.parametrize('tz', [pytz.timezone('US/Eastern'),
gettz('US/Eastern'),
'US/Eastern', 'dateutil/US/Eastern'])
def test_timestamp_add_timedelta_push_over_dst_boundary(self, tz):
# GH#1389
# 4 hours before DST transition
stamp = Timestamp('3/10/2012 22:00', tz=tz)
result = stamp + timedelta(hours=6)
# spring forward, + "7" hours
expected = Timestamp('3/11/2012 05:00', tz=tz)
assert result == expected
def test_timestamp_timetz_equivalent_with_datetime_tz(self,
tz_naive_fixture):
# GH21358
if tz_naive_fixture is not None:
tz = dateutil.tz.gettz(tz_naive_fixture)
else:
tz = None
stamp = Timestamp('2018-06-04 10:20:30', tz=tz)
_datetime = datetime(2018, 6, 4, hour=10,
minute=20, second=30, tzinfo=tz)
result = stamp.timetz()
expected = _datetime.timetz()
assert result == expected
| {
"content_hash": "6bf6b0bb009b6fc718f5333a1b941be4",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 79,
"avg_line_length": 40.55194805194805,
"alnum_prop": 0.5910328262610088,
"repo_name": "cython-testbed/pandas",
"id": "8cebfafeae82a3c80097e690e7120e7b0d181683",
"size": "12514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/scalar/timestamp/test_timezones.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14136208"
},
{
"name": "Shell",
"bytes": "27731"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
"""Power cycle devices using the 'Mbed TAS RM REST API'."""
import os
import json
import time
import requests
from .host_test_plugins import HostTestPluginBase
class HostTestPluginPowerCycleResetMethod(HostTestPluginBase):
"""Plugin interface adaptor for Mbed TAS RM REST API."""
name = "HostTestPluginPowerCycleResetMethod"
type = "ResetMethod"
stable = True
capabilities = ["power_cycle"]
required_parameters = ["target_id", "device_info"]
def __init__(self):
"""Initialise plugin."""
HostTestPluginBase.__init__(self)
def setup(self, *args, **kwargs):
"""Configure plugin.
This function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
"""Power cycle a device using the TAS RM API.
If the "capability" name is not "power_cycle" this method will just fail.
Args:
capability: Capability name.
args: Additional arguments.
kwargs: Additional arguments.
Returns:
True if the power cycle succeeded, otherwise False.
"""
if "target_id" not in kwargs or not kwargs["target_id"]:
self.print_plugin_error("Error: This plugin requires unique target_id")
return False
if "device_info" not in kwargs or type(kwargs["device_info"]) is not dict:
self.print_plugin_error(
"Error: This plugin requires dict parameter 'device_info' passed by "
"the caller."
)
return False
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
if capability in HostTestPluginPowerCycleResetMethod.capabilities:
target_id = kwargs["target_id"]
device_info = kwargs["device_info"]
ret = self.__get_mbed_tas_rm_addr()
if ret:
ip, port = ret
result = self.__hw_reset(ip, port, target_id, device_info)
return result
def __get_mbed_tas_rm_addr(self):
"""Get IP and Port of mbed tas rm service."""
try:
ip = os.environ["MBED_TAS_RM_IP"]
port = os.environ["MBED_TAS_RM_PORT"]
return ip, port
except KeyError as e:
self.print_plugin_error(
"HOST: Failed to read environment variable ("
+ str(e)
+ "). Can't perform hardware reset."
)
return None
def __hw_reset(self, ip, port, target_id, device_info):
"""Reset target device using TAS RM API."""
switch_off_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "OFF",
}
],
}
switch_on_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "ON",
}
],
}
result = False
# reset target
switch_off_req = self.__run_request(ip, port, switch_off_req)
if switch_off_req is None:
self.print_plugin_error("HOST: Failed to communicate with TAS RM!")
return result
if "error" in switch_off_req["sub_requests"][0]:
self.print_plugin_error(
"HOST: Failed to reset target. error = %s"
% switch_off_req["sub_requests"][0]["error"]
)
return result
def poll_state(required_state):
switch_state_req = {
"name": "switchResource",
"sub_requests": [
{
"resource_type": "mbed_platform",
"resource_id": target_id,
"switch_command": "STATE",
}
],
}
resp = self.__run_request(ip, port, switch_state_req)
start = time.time()
while (
resp
and (
resp["sub_requests"][0]["state"] != required_state
or (
required_state == "ON"
and resp["sub_requests"][0]["mount_point"] == "Not Connected"
)
)
and (time.time() - start) < 300
):
time.sleep(2)
resp = self.__run_request(ip, port, resp)
return resp
poll_state("OFF")
self.__run_request(ip, port, switch_on_req)
resp = poll_state("ON")
if (
resp
and resp["sub_requests"][0]["state"] == "ON"
and resp["sub_requests"][0]["mount_point"] != "Not Connected"
):
for k, v in resp["sub_requests"][0].viewitems():
device_info[k] = v
result = True
else:
self.print_plugin_error("HOST: Failed to reset device %s" % target_id)
return result
@staticmethod
def __run_request(ip, port, request):
headers = {"Content-type": "application/json", "Accept": "text/plain"}
get_resp = requests.get(
"http://%s:%s/" % (ip, port), data=json.dumps(request), headers=headers
)
resp = get_resp.json()
if get_resp.status_code == 200:
return resp
else:
return None
def load_plugin():
"""Return plugin available in this module."""
return HostTestPluginPowerCycleResetMethod()
| {
"content_hash": "079e0c941238322bce1e9493cb2f179b",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 85,
"avg_line_length": 32.6536312849162,
"alnum_prop": 0.5011120615911036,
"repo_name": "ARMmbed/greentea",
"id": "8b22bc055554d9cbbe0d9bc46f4f4d825db98f11",
"size": "5959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/htrun/host_tests_plugins/module_power_cycle_target.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "226269"
}
],
"symlink_target": ""
} |
import logging as _logging
from scli.constants import ParameterName as Name
from scli.terminal.base import TerminalBase
from scli.terminal.beanstalk_terminal import BeanstalkTerminal
from scli.terminal.service_terminal import ServiceTerminal
from scli.terminal.rds_terminal import RdsTerminal
log = _logging.getLogger('cli')
class Terminal(object):
_handlers = dict() # mapping for parameters need special handling
def __init__(self):
# Register special
self._handlers[Name.AwsAccessKeyId] = ServiceTerminal.ask_aws_access_key_id
self._handlers[Name.AwsSecretAccessKey] = ServiceTerminal.ask_aws_secret_access_key
self._handlers[Name.Region] = ServiceTerminal.ask_region
self._handlers[Name.ServiceEndpoint] = ServiceTerminal.ask_service_endpoint
self._handlers[Name.ApplicationName] = BeanstalkTerminal.ask_application_name
self._handlers[Name.EnvironmentName] = BeanstalkTerminal.ask_environment_name
self._handlers[Name.SolutionStack] = BeanstalkTerminal.ask_solution_stack
self._handlers[Name.BranchMapping] = BeanstalkTerminal.ask_branch
self._handlers[Name.RdsEnabled] = RdsTerminal.ask_rds_creation
self._handlers[Name.RdsMasterPassword] = RdsTerminal.ask_master_password
self._handlers[Name.RdsSourceSnapshotName] = RdsTerminal.ask_snapshot_name
self._handlers[Name.RdsDeletionPolicy] = RdsTerminal.ask_delete_to_snapshot
def ask_parameters(self, parameter_pool, parameter_names, skip_exist):
'''
Ask customer input of specified parameters via terminal interface.
if skip_exist is set to True, then any parameters having value in pool
will be skipped.
'''
# Sort parameter list
sorted_list = self._sort_list(parameter_names, parameter_pool) \
if skip_exist else self._sort_list(parameter_names, None)
for parameter_name in sorted_list:
if parameter_pool.has(parameter_name) and skip_exist:
continue
if parameter_name in self._handlers:
self._handlers[parameter_name](parameter_pool)
else:
TerminalBase.ask_parameter(parameter_pool, parameter_name)
@classmethod
def _sort_list(cls, parameter_names, parameter_pool = None):
'''
Return sorted list of parameter names according to their priority.
if parameter_pool is not None, returned list will not contain parameters
which already have value.
'''
sorted_list = []
for parameter_name in parameter_names:
if parameter_pool is not None and parameter_pool.has(parameter_name):
continue # skip current parameter as it already present in parameter pool
if len(sorted_list) < 1:
sorted_list.append(parameter_name)
else:
index = cls._find_index(sorted_list, parameter_name)
sorted_list.insert(index, parameter_name)
return sorted_list
@classmethod
def _find_index(cls, sorted_list, parameter_name):
for index, name in enumerate(sorted_list):
if not Name.is_ahead(name, parameter_name):
return index
else:
return len(sorted_list)
| {
"content_hash": "f759f647382060aadd94a7d689665f1f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 91,
"avg_line_length": 40.470588235294116,
"alnum_prop": 0.6494186046511627,
"repo_name": "JoaoVasques/aws-devtool",
"id": "aee34190fed56f75785d68fa938424972fa8aa4a",
"size": "4173",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "eb/macosx/python2.7/scli/terminal/terminal.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "31565"
},
{
"name": "Python",
"bytes": "6266200"
},
{
"name": "Ruby",
"bytes": "159495"
},
{
"name": "Shell",
"bytes": "1895"
}
],
"symlink_target": ""
} |
from collections.abc import Sequence
import numpy as np
import pytest
try:
import rowan
skip_rowan = False
except ImportError:
skip_rowan = True
import hoomd
import hoomd.md as md
skip_rowan = pytest.mark.skipif(skip_rowan, reason="rowan cannot be imported.")
@pytest.fixture
def valid_body_definition():
return {
"constituent_types": ["B", "B", "B", "B"],
"positions": [
[1, 0, -1 / (2**(1. / 2.))],
[-1, 0, -1 / (2**(1. / 2.))],
[0, -1, 1 / (2**(1. / 2.))],
[0, 1, 1 / (2**(1. / 2.))],
],
"orientations": [(1.0, 0.0, 0.0, 0.0)] * 4,
"charges": [0.0, 1.0, 2.0, 3.5],
"diameters": [1.0, 1.5, 0.5, 1.0]
}
def test_body_setting(valid_body_definition):
invalid_body_definitions = {
"constituent_types": [[4], "hello", ("A", 4)],
"positions": [[(1, 2)], [(1.0, 4.0, "foo")], 1.0, "hello"],
"orientations": [[(1, 2, 3)], [(1.0, 4.0, 5.0, "foo")], [1.0], 1.0,
"foo"],
"charges": [0.0, ["foo"]],
"diameters": [1.0, "foo", ["foo"]]
}
rigid = md.constrain.Rigid()
current_body_definition = {**valid_body_definition}
rigid.body["A"] = current_body_definition
for key, value in rigid.body["A"].items():
if (isinstance(value, Sequence) and len(value) > 0
and not isinstance(value[0], str)):
assert np.allclose(value, current_body_definition[key])
else:
assert value == current_body_definition[key]
# Test dictionaries with a single invalid input
for key, values in invalid_body_definitions.items():
for invalid_value in values:
current_body_definition[key] = invalid_value
with pytest.raises(hoomd.data.typeconverter.TypeConversionError):
rigid.body["A"] = current_body_definition
# Reset the body definition to a valid state to ensure only one key is
# ever invalid.
current_body_definition[key] = valid_body_definition[key]
def check_bodies(snapshot, definition):
"""Non-general assumes a snapshot from two_particle_snapshot_factory.
This is just to prevent duplication of code from test_create_bodies and
test_running_simulation.
"""
assert snapshot.particles.N == 10
assert all(snapshot.particles.typeid[3:] == 1)
assert snapshot.particles.body[0] == 0
assert all(snapshot.particles.body[2:6] == 0)
assert snapshot.particles.body[1] == 1
assert all(snapshot.particles.body[6:] == 1)
# check charges
for i in range(4):
assert snapshot.particles.charge[i + 2] == definition["charges"][i]
assert snapshot.particles.charge[i + 6] == definition["charges"][i]
# check diameters
for i in range(4):
assert snapshot.particles.diameter[i + 2] == definition["diameters"][i]
assert snapshot.particles.diameter[i + 6] == definition["diameters"][i]
particle_one = (snapshot.particles.position[0],
snapshot.particles.orientation[0])
particle_two = (snapshot.particles.position[1],
snapshot.particles.orientation[1])
# Check positions
def check_position(central_position, central_orientation,
constituent_position, local_position):
d_pos = rowan.rotate(central_orientation, local_position)
assert np.allclose(central_position + d_pos, constituent_position)
for i in range(4):
check_position(*particle_one, snapshot.particles.position[i + 2],
definition["positions"][i])
check_position(*particle_two, snapshot.particles.position[i + 6],
definition["positions"][i])
# check orientation
def check_orientation(central_orientation, constituent_orientation,
local_orientation):
expected_orientation = rowan.normalize(
rowan.multiply(central_orientation, local_orientation))
assert np.allclose(expected_orientation, local_orientation)
for i in range(4):
check_orientation(particle_one[1],
snapshot.particles.orientation[i + 2],
definition["orientations"][i])
check_orientation(particle_two[1],
snapshot.particles.orientation[i + 6],
definition["orientations"][i])
@skip_rowan
def test_create_bodies(simulation_factory, two_particle_snapshot_factory,
lattice_snapshot_factory, valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
snapshot = sim.state.get_snapshot()
if snapshot.communicator.rank == 0:
check_bodies(snapshot, valid_body_definition)
sim.operations.integrator = hoomd.md.Integrator(dt=0.005, rigid=rigid)
# Ensure validate bodies passes
sim.run(0)
# Second test with more general testing
# detach rigid
sim.operations.integrator.rigid = None
initial_snapshot = lattice_snapshot_factory(n=10)
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["C", "A", "B"]
# Grab the middle particles and a random one to ensure that particle
# type ordering with respect to particle tag does not matter for
# create_bodies.
initial_snapshot.particles.typeid[100:800] = 1
initial_snapshot.particles.typeid[55] = 1
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
snapshot = sim.state.get_snapshot()
if snapshot.communicator.rank == 0:
# Check central particles
central_tags = np.empty(701, dtype=int)
central_tags[0] = 55
central_tags[1:] = np.arange(100, 800)
print
assert np.all(snapshot.particles.body[central_tags] == central_tags)
# Check free bodies
assert np.all(snapshot.particles.body[:55] == -1)
assert np.all(snapshot.particles.body[56:100] == -1)
assert np.all(snapshot.particles.body[800:1000] == -1)
# Check constituent_particles
assert np.all(
snapshot.particles.body[1000:] == np.repeat(central_tags, 4))
sim.operations.integrator = hoomd.md.Integrator(dt=0.005, rigid=rigid)
# Ensure validate bodies passes
sim.run(0)
def test_attaching(simulation_factory, two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
integrator = md.Integrator(dt=0.005, methods=[langevin])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
rigid.create_bodies(sim.state)
sim.operations += integrator
sim.run(0)
for key, value in rigid.body["A"].items():
if (isinstance(value, Sequence) and len(value) > 0
and not isinstance(value[0], str)):
assert np.allclose(value, valid_body_definition[key])
else:
assert value == valid_body_definition[key]
@pytest.mark.serial
def test_error_on_invalid_body(simulation_factory,
two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
integrator = md.Integrator(dt=0.005, methods=[langevin])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.operations += integrator
with pytest.raises(RuntimeError):
sim.run(0)
@skip_rowan
def test_running_simulation(simulation_factory, two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
rigid.body["A"] = valid_body_definition
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
rigid.create_bodies(sim.state)
sim.operations += integrator
sim.run(5)
snapshot = sim.state.get_snapshot()
if sim.device.communicator.rank == 0:
check_bodies(snapshot, valid_body_definition)
def test_running_without_body_definition(simulation_factory,
two_particle_snapshot_factory):
rigid = md.constrain.Rigid()
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
sim.operations += integrator
sim.run(1)
@pytest.mark.serial
def test_setting_body_after_attaching(simulation_factory,
two_particle_snapshot_factory,
valid_body_definition):
rigid = md.constrain.Rigid()
langevin = md.methods.Langevin(kT=2.0, filter=hoomd.filter.Rigid())
lj = hoomd.md.pair.LJ(nlist=md.nlist.Cell(), mode="shift")
lj.params.default = {"epsilon": 0.0, "sigma": 1}
lj.params[("A", "A")] = {"epsilon": 1.0}
lj.params[("B", "B")] = {"epsilon": 1.0}
lj.r_cut.default = 2**(1.0 / 6.0)
integrator = md.Integrator(dt=0.005, methods=[langevin], forces=[lj])
integrator.rigid = rigid
initial_snapshot = two_particle_snapshot_factory()
if initial_snapshot.communicator.rank == 0:
initial_snapshot.particles.types = ["A", "B"]
sim = simulation_factory(initial_snapshot)
sim.seed = 5
sim.operations += integrator
sim.run(1)
rigid.body["A"] = valid_body_definition
# This should error because the bodies have not been updated, but the
# setting should be fine.
with pytest.raises(RuntimeError):
sim.run(1)
| {
"content_hash": "6097592350fad3fb9e88190e4a4e37d5",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 37.82333333333333,
"alnum_prop": 0.6198995329161893,
"repo_name": "joaander/hoomd-blue",
"id": "642e26ef6552f4649a35294c5f6766ab33b60852",
"size": "11347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hoomd/md/pytest/test_rigid.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7257"
},
{
"name": "C",
"bytes": "32300"
},
{
"name": "C++",
"bytes": "7343253"
},
{
"name": "CMake",
"bytes": "157407"
},
{
"name": "Cuda",
"bytes": "1360644"
},
{
"name": "HTML",
"bytes": "1804"
},
{
"name": "Makefile",
"bytes": "7678"
},
{
"name": "Perl",
"bytes": "4439"
},
{
"name": "Python",
"bytes": "2162907"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
from .composition_parts import DebugInfo
from .composition_parts import WithIdentifier
class Proxy(object):
"""
Proxies attribute access on this object to the target object.
"""
def __init__(self,
target_object=None,
target_attrs=None,
target_attrs_with_priority=None):
"""
Creates a new proxy to |target_object|.
Args:
target_object: The object to which attribute access is proxied.
This can be set later by set_target_object.
target_attrs: None or list of attribute names to be proxied. If
None, all the attribute access is proxied.
target_attrs_with_priority: None or list of attribute names to be
unconditionally proxied with priority over attributes defined on
|self|. If None, no attribute has priority over own attributes.
"""
if target_attrs is not None:
assert isinstance(target_attrs, (list, set, tuple))
assert all(isinstance(attr, str) for attr in target_attrs)
self._target_object = target_object
self._target_attrs = target_attrs
self._target_attrs_with_priority = target_attrs_with_priority
def __getattr__(self, attribute):
try:
target_object = object.__getattribute__(self, '_target_object')
target_attrs = object.__getattribute__(self, '_target_attrs')
except AttributeError:
# When unpickling, __init__ does not get called. _target_object is
# not defined yet during unpickling. Then, just fallback to the
# default access.
return object.__getattribute__(self, attribute)
assert target_object is not None
if target_attrs is None or attribute in target_attrs:
return getattr(target_object, attribute)
raise AttributeError
def __getattribute__(self, attribute):
try:
target_object = object.__getattribute__(self, '_target_object')
target_attrs = object.__getattribute__(
self, '_target_attrs_with_priority')
except AttributeError:
# When unpickling, __init__ does not get called. _target_object is
# not defined yet during unpickling. Then, just fallback to the
# default access.
return object.__getattribute__(self, attribute)
# It's okay to access own attributes, such as 'identifier', even when
# the target object is not yet resolved.
if target_object is None:
return object.__getattribute__(self, attribute)
if target_attrs is not None and attribute in target_attrs:
return getattr(target_object, attribute)
return object.__getattribute__(self, attribute)
@staticmethod
def get_all_attributes(target_class):
"""
Returns all the attributes of |target_class| including its ancestors'
attributes. Protected attributes (starting with an underscore,
including two underscores) are excluded.
"""
def collect_attrs_recursively(target_class):
attrs_sets = [set(vars(target_class).keys())]
for base_class in target_class.__bases__:
attrs_sets.append(collect_attrs_recursively(base_class))
return set.union(*attrs_sets)
assert isinstance(target_class, type)
return sorted([
attr for attr in collect_attrs_recursively(target_class)
if not attr.startswith('_')
])
def make_copy(self, memo):
return self
def set_target_object(self, target_object):
assert self._target_object is None
assert isinstance(target_object, object)
self._target_object = target_object
@property
def target_object(self):
assert self._target_object is not None
return self._target_object
_REF_BY_ID_PASS_KEY = object()
class RefById(Proxy, WithIdentifier):
"""
Represents a reference to an object specified with the given identifier,
which reference will be resolved later.
This reference is also a proxy to the object for convenience so that you
can treat this reference as if the object itself.
"""
def __init__(self,
identifier,
debug_info=None,
target_attrs=None,
target_attrs_with_priority=None,
pass_key=None):
assert debug_info is None or isinstance(debug_info, DebugInfo)
assert pass_key is _REF_BY_ID_PASS_KEY
Proxy.__init__(
self,
target_attrs=target_attrs,
target_attrs_with_priority=target_attrs_with_priority)
WithIdentifier.__init__(self, identifier)
self._ref_own_debug_info = debug_info
@property
def ref_own_debug_info(self):
"""This reference's own DebugInfo."""
return self._ref_own_debug_info
class RefByIdFactory(object):
"""
Creates a group of references that are later resolvable.
All the references created by this factory are grouped per factory, and you
can apply a function to all the references. This allows you to resolve all
the references at very end of the compilation phases.
"""
def __init__(self, target_attrs=None, target_attrs_with_priority=None):
self._references = []
# |_is_frozen| is initially False and you can create new references.
# The first invocation of |for_each| freezes the factory and you can no
# longer create a new reference
self._is_frozen = False
self._target_attrs = target_attrs
self._target_attrs_with_priority = target_attrs_with_priority
def create(self, identifier, debug_info=None):
"""
Creates a new instance of RefById.
Args:
identifier: An identifier to be resolved later.
debug_info: Where the reference is created, which is useful
especially when the reference is unresolvable.
"""
assert not self._is_frozen
ref = RefById(
identifier,
debug_info=debug_info,
target_attrs=self._target_attrs,
target_attrs_with_priority=self._target_attrs_with_priority,
pass_key=_REF_BY_ID_PASS_KEY)
self._references.append(ref)
return ref
def init_subclass_instance(self, instance, identifier, debug_info=None):
"""
Initializes an instance of a subclass of RefById.
"""
assert type(instance) is not RefById
assert isinstance(instance, RefById)
assert not self._is_frozen
RefById.__init__(
instance,
identifier,
debug_info=debug_info,
target_attrs=self._target_attrs,
target_attrs_with_priority=self._target_attrs_with_priority,
pass_key=_REF_BY_ID_PASS_KEY)
self._references.append(instance)
def for_each(self, callback):
"""
Applies |callback| to all the references created by this factory.
You can no longer create a new reference.
Args:
callback: A callable that takes a reference as only the argument.
Return value is not used.
"""
assert callable(callback)
self._is_frozen = True
for ref in self._references:
callback(ref)
| {
"content_hash": "70ee681ae4b941495dcc6ccf557acbff",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 80,
"avg_line_length": 37.71356783919598,
"alnum_prop": 0.6131912058627582,
"repo_name": "chromium/chromium",
"id": "8de5e54524c977fe14585aeff44fb8f92cbd79f7",
"size": "7646",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/blink/renderer/bindings/scripts/web_idl/reference.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import sys
import json
import time
import requests
from uai.utils.utils import _verfy_ac
from uai.utils.logger import uai_logger
from uaitrain.arch_conf.base_conf import *
from uai.utils.retcode_checker import *
MAX_POLL_STEPS = 200
DEPLOY_ID_FILE = './deploy_id.log'
UCLOUD_API_URL = 'http://api.ucloud.cn'
PARAMS_DEFAULT_REGION = 'cn-bj2'
PARAMS_DEFAULT_ZONE ='cn-bj2-04'
PARAMS_DEFAULT_BUSINESSGROUP = "Default"
PACKAGE_TYPE = {'os':'OS', 'language':'Python', 'ai_arch_v':'AIFrame', 'accelerator':'Accelerator'}
class UaiCmdTool(object):
""" The Base Create Tool Class with UAI
"""
def __init__(self, parser):
self.parser = parser
self.conf_params = {}
self.cmd_params = {}
self._add_args()
def _add_args(self):
self.config = ArchJsonConf('', self.parser)
def cmd(self):
""" Create the task of specified task id
"""
# if self.conf_params:
self._load_args()
self._format_request_param()
self._cmd_request()
def _load_args(self):
self.config.load_params()
self.conf_params = self.config.params
def _format_request_param(self):
self._format_account_param()
if self.conf_params['commands'] == 'checkbase':
self._format_checkbase_param()
self.cmd_url = UCLOUD_API_URL
if self.conf_params['commands'] == 'create':
self._format_create_param()
self.cmd_url = UCLOUD_API_URL
else:
uai_logger.error("Command is not valid: {0} ".format(self.conf_params['commands']))
raise RuntimeError("Command is not valid: {0} ".format(self.conf_params['commands']))
def _format_account_param(self):
self.cmd_params['PublicKey'] = self.conf_params['public_key']
if self.conf_params['project_id']:
self.cmd_params['ProjectId'] = self.conf_params['project_id']
def _format_create_param(self):
self.cmd_params['Region'] = PARAMS_DEFAULT_REGION
self.cmd_params['Zone'] = PARAMS_DEFAULT_ZONE
self.cmd_params['TrainJobName'] = self.conf_params['job_name']
self.cmd_params['TrainPublicKey'] = self.conf_params['public_key']
self.cmd_params['TrainPrivateKey'] = self.conf_params['private_key']
self.cmd_params['TrainWorkId'] = self.conf_params['worker_id']
self.cmd_params['CodeUhubPath'] = self.conf_params['uhub_path']
self.cmd_params['DataUfilePath'] = self.conf_params['ufile_datapath']
self.cmd_params['OutputUfilePath'] = self.conf_params['ufile_outputpath']
self.cmd_params['DockerCmd'] = self.conf_params['docker_cmd']
self.cmd_params['MaxExecuteTime'] = self.conf_params['max_exectime']
self.cmd_params['Action'] = 'CreateUAITrainJob'
def _format_checkbase_param(self):
self.cmd_params['OSVersion'] = self.conf_params['os']
self.cmd_params['PythonVersion'] = self.conf_params['language']
self.cmd_params['AIFrameVersion'] = self.conf_params['ai_arch_v']
self.cmd_params['AcceleratorID'] = self.conf_params['accelerator']
# #Action must be applied at last
self.cmd_params['Action'] = 'CheckUAITrainBaseImgExists'
def _format_availableenv_param(self, type):
self.cmd_params['PkgType'] = PACKAGE_TYPE[type]
# #Action must be applied at last
self.cmd_params['Action'] = 'GetUAITrainEnvPkg'
def _cmd_request(self):
if self.conf_params['commands'] == 'availableenv':
self._cmd_writefile_package(self.conf_params['pkg_type'])
else:
self._cmd_common_request()
def _cmd_common_request(self):
if ('Signature' in self.cmd_params) is True:
self.cmd_params.pop('Signature')
self.cmd_params['Signature'] = _verfy_ac(self.conf_params['private_key'],
self.cmd_params)
uai_logger.info("Call http request: {0} ".format(get_request(self.cmd_url, params=self.cmd_params)))
r = requests.get(self.cmd_url, params=self.cmd_params)
self.rsp = json.loads(r.text, encoding='utf-8')
if self.rsp["RetCode"] != 0:
uai_logger.error("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], self.rsp["RetCode"], self.rsp["Message"].encode('utf-8')))
raise RuntimeError("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], self.rsp["RetCode"], self.rsp["Message"].encode('utf-8')))
else:
del self.rsp['Action']
uai_logger.info("{0} Success: {1}".format(self.cmd_params["Action"], get_response(self.rsp,0)))
def _cmd_writefile_package(self, filepath):
if ('Signature' in self.cmd_params) is True:
self.cmd_params.pop('Signature')
self.cmd_params['Signature'] = _verfy_ac(self.conf_params['private_key'],
self.cmd_params)
uai_logger.info("Call http request: {0} ".format(get_request(self.cmd_url, params=self.cmd_params)))
r = requests.get(self.cmd_url, params=self.cmd_params)
rsp = json.loads(r.text, encoding='utf-8')
if rsp["RetCode"] != 0:
uai_logger.error("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], rsp["RetCode"], rsp["Message"].encode('utf-8')))
raise RuntimeError(
"{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], rsp["RetCode"], rsp["Message"].encode('utf-8')))
else:
with open(filepath, 'w') as f:
json.dump(rsp["PkgSet"], f)
def translate_pkg_params(self):
if self.conf_params['os'] and type(self.conf_params['os']) is str:
self.conf_params['os'] = \
self._translate_pkg_to_id('os', self.conf_params['os'].split(','))[0]
if self.conf_params['language'] and type(self.conf_params['language']) is str:
self.conf_params['language'] = \
self._translate_pkg_to_id('language', self.conf_params['language'].split(','))[0]
if self.conf_params['ai_arch_v'] and type(self.conf_params['ai_arch_v']) is str:
self.conf_params['ai_arch_v'] = \
self._translate_pkg_to_id('ai_arch_v', self.conf_params['ai_arch_v'].split(','))[0]
if self.conf_params['accelerator'] and type(self.conf_params['accelerator']) is str:
self.conf_params['accelerator'] = \
self._translate_pkg_to_id('accelerator', self.conf_params['accelerator'].split(','))[0]
def _translate_pkg_to_id(self, pkgtype, pkglist):
if not os.path.exists(pkgtype):
# raise RuntimeError("{0} file doesn't found, please download from github "
# "and put it under the same directory as deploy tool".format(pkgtype))
uai_logger.info("Start download {0} package info".format(pkgtype))
self.conf_params['pkg_type'] = pkgtype
self._format_account_param()
self._format_availableenv_param(pkgtype)
self.cmd_url = UCLOUD_API_URL
self._cmd_writefile_package(pkgtype)
resultlist = []
uai_logger.info("Start translate {0} package to their id, packages: {1}".format(pkgtype, pkglist))
for avpkg in json.load(open(pkgtype), 'utf-8'):
for pkg in pkglist:
if pkgtype == 'os' or pkgtype == 'language' or pkgtype == 'ai_arch_v':
versionsplit = pkg.rfind('-')
if versionsplit >= 0:
if avpkg["PkgName"] == pkg[:versionsplit] and (
avpkg["PkgVersion"] == "" or avpkg["PkgVersion"] == pkg[versionsplit + 1:]):
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
elif versionsplit < 0:
if avpkg["PkgName"] == pkg:
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
else:
if avpkg["PkgName"] == pkg:
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
if len(pkglist) != 0:
uai_logger.error("Some {0} package is not supported: {1}".format(pkgtype, pkglist))
raise RuntimeError("Some {0} package is not supported: {1}".format(pkgtype, pkglist))
uai_logger.info("End translate {0} package to their id, result: {1}".format(pkgtype, resultlist))
return resultlist
def get_base_image(self, conf_params):
self.conf_params = conf_params
self.conf_params["commands"] = "checkbase"
self._format_account_param()
self._format_checkbase_param()
self.cmd_url = UCLOUD_API_URL
self._cmd_common_request()
return self.rsp["BimgName"][0] | {
"content_hash": "38796a23279b71f49e6f62ea3aa6266c",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 144,
"avg_line_length": 47.73118279569893,
"alnum_prop": 0.5839152962378914,
"repo_name": "ucloud/uai-sdk",
"id": "ce4515946289ce8cdf8ac5f5e1dc5fb71ecc58e4",
"size": "9565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uaitrain/cmd/base_cmd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "470557"
}
],
"symlink_target": ""
} |
import ftplib
import os
import paramiko
from nova import exception as nova_exception
from nova.openstack.common import log as logging
from nova.virt.powervm import exception
LOG = logging.getLogger(__name__)
class Connection(object):
def __init__(self, host, username, password, port=22):
self.host = host
self.username = username
self.password = password
self.port = port
def ssh_connect(connection):
"""Method to connect to remote system using ssh protocol.
:param connection: a Connection object.
:returns: paramiko.SSHClient -- an active ssh connection.
:raises: PowerVMConnectionFailed
"""
try:
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(connection.host,
username=connection.username,
password=connection.password,
port=connection.port)
return ssh
except Exception:
LOG.exception(_('Connection error connecting PowerVM manager'))
raise exception.PowerVMConnectionFailed()
def ssh_command_as_root(ssh_connection, cmd, check_exit_code=True):
"""Method to execute remote command as root.
:param connection: an active paramiko.SSHClient connection.
:param command: string containing the command to run.
:returns: Tuple -- a tuple of (stdout, stderr)
:raises: nova.exception.ProcessExecutionError
"""
LOG.debug(_('Running cmd (SSH-as-root): %s') % cmd)
chan = ssh_connection._transport.open_session()
# This command is required to be executed
# in order to become root.
chan.exec_command('ioscli oem_setup_env')
bufsize = -1
stdin = chan.makefile('wb', bufsize)
stdout = chan.makefile('rb', bufsize)
stderr = chan.makefile_stderr('rb', bufsize)
# We run the command and then call 'exit' to exit from
# super user environment.
stdin.write('%s\n%s\n' % (cmd, 'exit'))
stdin.flush()
exit_status = chan.recv_exit_status()
# Lets handle the error just like nova.utils.ssh_execute does.
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise nova_exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return (stdout, stderr)
def ftp_put_command(connection, local_path, remote_dir):
"""Method to transfer a file via ftp.
:param connection: a Connection object.
:param local_path: path to the local file
:param remote_dir: path to remote destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
user=connection.username,
passwd=connection.password)
ftp.cwd(remote_dir)
name = os.path.split(local_path)[1]
f = open(local_path, "rb")
ftp.storbinary("STOR " + name, f)
f.close()
ftp.close()
except Exception:
LOG.error(_('File transfer to PowerVM manager failed'))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='PUT',
source_path=local_path, dest_path=remote_dir)
def ftp_get_command(connection, remote_path, local_path):
"""Retrieve a file via FTP
:param connection: a Connection object.
:param remote_path: path to the remote file
:param local_path: path to local destination
:raises: PowerVMFileTransferFailed
"""
try:
ftp = ftplib.FTP(host=connection.host,
user=connection.username,
passwd=connection.password)
ftp.cwd(os.path.dirname(remote_path))
name = os.path.basename(remote_path)
LOG.debug(_("ftp GET %(remote_path)s to: %(local_path)s") % locals())
with open(local_path, 'w') as ftpfile:
ftpcmd = 'RETR %s' % name
ftp.retrbinary(ftpcmd, ftpfile.write)
ftp.close()
except Exception:
LOG.error(_("File transfer from PowerVM manager failed"))
raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',
source_path=remote_path, dest_path=local_path)
def aix_path_join(path_one, path_two):
"""Ensures file path is built correctly for remote UNIX system
:param path_one: string of the first file path
:param path_two: string of the second file path
:returns: a uniform path constructed from both strings
"""
if path_one.endswith('/'):
path_one = path_one.rstrip('/')
if path_two.startswith('/'):
path_two = path_two.lstrip('/')
final_path = path_one + '/' + path_two
return final_path
| {
"content_hash": "3bf82085e47d282b16452fa1ad5170d2",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 77,
"avg_line_length": 34.77857142857143,
"alnum_prop": 0.6181967549804888,
"repo_name": "fajoy/nova",
"id": "bf69be84e03d9a73564b871b20ac4971d537ce9a",
"size": "5510",
"binary": false,
"copies": "2",
"ref": "refs/heads/grizzly-2",
"path": "nova/virt/powervm/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7567423"
},
{
"name": "Shell",
"bytes": "15428"
}
],
"symlink_target": ""
} |
"""
Support for switches which integrates with other components.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.template/
"""
import logging
from homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchDevice
from homeassistant.const import (
ATTR_FRIENDLY_NAME, CONF_VALUE_TEMPLATE, STATE_OFF, STATE_ON,
ATTR_ENTITY_ID, MATCH_ALL)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.script import Script
from homeassistant.helpers import template
from homeassistant.helpers.event import track_state_change
from homeassistant.util import slugify
CONF_SWITCHES = 'switches'
ON_ACTION = 'turn_on'
OFF_ACTION = 'turn_off'
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, 'true', 'false']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Template switch."""
switches = []
if config.get(CONF_SWITCHES) is None:
_LOGGER.error("Missing configuration data for switch platform")
return False
for device, device_config in config[CONF_SWITCHES].items():
if device != slugify(device):
_LOGGER.error("Found invalid key for switch.template: %s. "
"Use %s instead", device, slugify(device))
continue
if not isinstance(device_config, dict):
_LOGGER.error("Missing configuration data for switch %s", device)
continue
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
on_action = device_config.get(ON_ACTION)
off_action = device_config.get(OFF_ACTION)
if state_template is None:
_LOGGER.error(
"Missing %s for switch %s", CONF_VALUE_TEMPLATE, device)
continue
if on_action is None or off_action is None:
_LOGGER.error(
"Missing action for switch %s", device)
continue
entity_ids = device_config.get(ATTR_ENTITY_ID, MATCH_ALL)
switches.append(
SwitchTemplate(
hass,
device,
friendly_name,
state_template,
on_action,
off_action,
entity_ids)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_devices(switches)
return True
class SwitchTemplate(SwitchDevice):
"""Representation of a Template switch."""
# pylint: disable=too-many-arguments
def __init__(self, hass, device_id, friendly_name, state_template,
on_action, off_action, entity_ids):
"""Initialize the Template switch."""
self.hass = hass
self.entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id,
hass=hass)
self._name = friendly_name
self._template = state_template
self._on_script = Script(hass, on_action)
self._off_script = Script(hass, off_action)
self._state = False
self.update()
def template_switch_state_listener(entity, old_state, new_state):
"""Called when the target device changes state."""
self.update_ha_state(True)
track_state_change(hass, entity_ids,
template_switch_state_listener)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def available(self):
"""If switch is available."""
return self._state is not None
def turn_on(self, **kwargs):
"""Fire the on action."""
self._on_script.run()
def turn_off(self, **kwargs):
"""Fire the off action."""
self._off_script.run()
def update(self):
"""Update the state from the template."""
try:
state = template.render(self.hass, self._template).lower()
if state in _VALID_STATES:
self._state = state in ('true', STATE_ON)
else:
_LOGGER.error(
'Received invalid switch is_on state: %s. Expected: %s',
state, ', '.join(_VALID_STATES))
self._state = None
except TemplateError as ex:
_LOGGER.error(ex)
self._state = None
| {
"content_hash": "25ca82d1a94cd4d37fcd2b6655d34e45",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 77,
"avg_line_length": 31.885906040268456,
"alnum_prop": 0.5981898547674174,
"repo_name": "hmronline/home-assistant",
"id": "ebb3cb422589516bb08f20f7e4de9620f63be60f",
"size": "4751",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1308675"
},
{
"name": "JavaScript",
"bytes": "10846"
},
{
"name": "Python",
"bytes": "2667325"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "6430"
}
],
"symlink_target": ""
} |
from django.db import models
class Project(models.Model):
name = models.CharField(max_length=10)
url = models.URLField()
project_query = models.TextField()
epics_query = models.TextField()
stories_query = models.TextField()
tasks_query = models.TextField()
def __unicode__(self):
return self.name | {
"content_hash": "c2923234054c5d2e81dc3818423848db",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.6807228915662651,
"repo_name": "purplecode/probable.backlog",
"id": "2449236410b93213b556f8a74781c7f4d2433533",
"size": "332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backlog/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "115050"
},
{
"name": "HTML",
"bytes": "22085"
},
{
"name": "JavaScript",
"bytes": "5087493"
},
{
"name": "Makefile",
"bytes": "1785"
},
{
"name": "Python",
"bytes": "28720"
},
{
"name": "Ruby",
"bytes": "655"
},
{
"name": "Shell",
"bytes": "304"
}
],
"symlink_target": ""
} |
from flask import current_app, flash, redirect, render_template, request, session, url_for, make_response
from flask_login import current_user, login_required, login_user, logout_user
from sqlalchemy.exc import IntegrityError
from app import login_manager, db
from app.auth import auth
from app.auth.forms import LoginForm, PasswordForm
from app.auth.utils import get_self_url, init_saml_auth, ldap_authentication, prepare_saml_request
from app.models import Users, Roles
from werkzeug.urls import url_parse
@login_manager.user_loader
def user_loader(user_id):
return Users.query.filter_by(id=user_id).first()
@auth.route('/saml', methods=['GET', 'POST'])
def saml():
"""
View function to login users using SAML
"""
req = prepare_saml_request(request)
onelogin_saml_auth = init_saml_auth(req)
if 'sso' in request.args:
return redirect(onelogin_saml_auth.login(return_to=url_for('main.index', _external=True)))
elif 'sso2' in request.args or 'next' in request.args:
return_to = '{host_url}{next}'.format(host_url=request.host_url,
next='/'.join(request.args['next'].split('/')[1:]))
return redirect(onelogin_saml_auth.login(return_to))
elif 'slo' in request.args:
name_id = None
session_index = None
if 'samlNameId' in session:
name_id = session['samlNameId']
if 'samlSessionIndex' in session:
session_index = session['samlSessionIndex']
return redirect(onelogin_saml_auth.logout(name_id=name_id, session_index=session_index))
elif 'acs' in request.args:
onelogin_request = prepare_saml_request(request)
onelogin_saml_auth = init_saml_auth(onelogin_request)
onelogin_saml_auth.process_response()
errors = onelogin_saml_auth.get_errors()
if len(errors) == 0:
session['samlUserdata'] = onelogin_saml_auth.get_attributes()
session['samlNameId'] = onelogin_saml_auth.get_nameid()
session['samlSessionIndex'] = onelogin_saml_auth.get_session_index()
email=session['samlUserdata']['mail'][0]
user = Users.query.filter_by(email=email.lower()).first()
Roles.populate()
if user is None and email.find("records.nyc.gov") >= 0:
user = Users(
first_name=session['samlUserdata']['givenName'][0],
middle_initial= session['samlUserdata']['middleName'][0] if session['samlUserdata']['middleName'] else "" ,
last_name=session['samlUserdata']['sn'][0],
email=session['samlUserdata']['mail'][0],
password="Change4me",
role_id = Roles.query.filter_by(name="Employee").first().id, #setting it to Employee by default
division = "",
phone_number="",
title="",
room="",
)
db.session.add(user)
db.session.commit()
self_url = get_self_url(onelogin_request)
login_user(user)
return redirect((url_for('main.index')))
elif user:
self_url = get_self_url(onelogin_request)
login_user(user)
return redirect((url_for('main.index')))
else:
flash('Sorry, we couldn\'t find your account. Please send an email to <a href="mailto:appsupport@records.nyc.gov">appsupport@records.nyc.gov</a> for assistance.', category='danger')
self_url = get_self_url(onelogin_request)
if 'RelayState' in request.form and self_url != request.form['RelayState'] and self_url in request.form[
'RelayState']:
return redirect(request.form['RelayState'])
return redirect(url_for('main.index'))
elif 'sls' in request.args:
dscb = lambda: session.clear()
url = onelogin_saml_auth.process_slo(delete_session_cb=dscb)
errors = onelogin_saml_auth.get_errors()
if len(errors) == 0: #['invalid_logout_response_signature', 'Signature validation failed. Logout Response rejected']
if url is not None:
return redirect(url)
else:
flash("You have successfully logged out", category='success')
return redirect(url_for('main.index'))
logout_user()
flash("You have successfully logged out!", category='success')
return redirect(url_for('main.index'))
@auth.route('/login', methods=['GET', 'POST'])
def login():
"""
View function to login users using LDAP
"""
if current_user.is_authenticated and not current_app.config['USE_SAML']:
return redirect(url_for('main.index'))
login_form = LoginForm()
if request.method == 'POST':
authenticated = False
user = None
email = None
password = None
if current_app.config['USE_SAML'] and 'acs' in request.args:
onelogin_request = prepare_saml_request(request)
onelogin_saml_auth = init_saml_auth(onelogin_request)
onelogin_saml_auth.process_response()
errors = onelogin_saml_auth.get_errors()
not_auth_warn = not onelogin_saml_auth.is_authenticated()
if len(errors) == 0:
session['samlUserdata'] = onelogin_saml_auth.get_attributes()
session['samlNameId'] = onelogin_saml_auth.get_nameid()
session['samlSessionIndex'] = onelogin_saml_auth.get_session_index()
user = Users.query.filter_by(email=session['samlUserdata']['mail'][0]).first()
authenticated = True
else:
email = login_form.email.data
password = login_form.password.data
user = Users.query.filter_by(email=email).first()
if user is not None:
if current_app.config['LOGIN_REQUIRED']:
# Determine authentication method
if current_app.config['USE_LDAP']:
authenticated = ldap_authentication(email, password)
elif current_app.config['USE_LOCAL_AUTH']:
authenticated = user.check_password(password)
if authenticated:
login_user(user, remember=login_form.remember_me.data)
# check if password has expired or is the default password
if current_user.has_invalid_password and current_app.config['USE_LOCAL_AUTH']:
return redirect(url_for('auth.password'))
return redirect(url_for('main.index'))
else:
login_user(user, remember=login_form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
flash("Invalid username/password combination.", category="danger")
return render_template('login.html', login_form=login_form)
else:
flash("User not found. Please contact IT to gain access to the system.", category="warning")
return render_template('login.html', login_form=login_form)
if current_app.config['USE_SAML']:
onelogin_request = prepare_saml_request(request)
onelogin_saml_auth = init_saml_auth(onelogin_request)
errors = []
not_auth_warn = False
success_slo = False
attributes = False
paint_logout = False
if 'sso' in request.args:
return redirect(onelogin_saml_auth.login())
elif 'sso2' in request.args:
return_to = '{host_url}/attrs'.format(host_url=request.host_url)
return redirect(onelogin_saml_auth.login(return_to))
elif 'slo' in request.args:
name_id = None
session_index = None
if 'samlNameId' in session:
name_id = session['samlNameId']
if 'samlSessionIndex' in session:
session_index = session['samlSessionIndex']
return redirect(onelogin_saml_auth.logout(name_id=name_id, session_index=session_index))
elif 'sls' in request.args:
dscb = lambda: session.clear()
url = onelogin_saml_auth.process_slo(delete_session_cb=dscb)
errors = onelogin_saml_auth.get_errors()
if len(errors) == 0:
if url is not None:
return redirect(url)
else:
return redirect(url_for('main.index'))
return render_template('login.html', login_form=login_form)
@auth.route('/metadata/')
def metadata():
req = prepare_saml_request(request)
onelogin_auth = init_saml_auth(req)
settings = onelogin_auth.get_settings()
metadata = settings.get_sp_metadata()
errors = settings.validate_metadata(metadata)
if len(errors) == 0:
resp = make_response(metadata, 200)
resp.headers['Content-Type'] = 'text/xml'
else:
resp = make_response(', '.join(errors), 500)
return resp
@auth.route('/password', methods=['GET', 'POST'])
@login_required
def password():
"""
Return the change password page and redirect to the home page
if password change is successful.
"""
password_form = PasswordForm()
if password_form.validate_on_submit():
current_user.update_password(password_form.current_password.data,
password_form.new_password.data)
return redirect('/')
return render_template('change_password.html', password_form=password_form)
@auth.route('/logout', methods=['GET'])
def logout():
"""
View function to logout users
"""
logout_user()
return redirect(url_for('main.index'))
| {
"content_hash": "bf479d7829c415d99fdfde01d093f0df",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 197,
"avg_line_length": 43.35807860262009,
"alnum_prop": 0.5956289656561587,
"repo_name": "nycrecords/intranet",
"id": "566c97c3d9f680e6429ce477b6e4928b58152579",
"size": "9929",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "app/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41304"
},
{
"name": "Dockerfile",
"bytes": "1624"
},
{
"name": "HTML",
"bytes": "267962"
},
{
"name": "JavaScript",
"bytes": "57158"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "328971"
},
{
"name": "Ruby",
"bytes": "1984"
},
{
"name": "Shell",
"bytes": "10819"
}
],
"symlink_target": ""
} |
"""
This module holds the quantization config class object that is accessed globally by library modules.
DIRECT USE OF THIS MODULE BY USER IS PROHIBITED.
"""
# List that holds quantization config class object, Length is always one!
# Object is added automatically on class creation
G_CONFIG_OBJECT = []
def add_config_object(config_object: "BaseConfig") -> None:
"""
Add instance of quantize config class to the global list.
Args:
config_object : Instance of one of four quantize config class
"""
assert (
len(G_CONFIG_OBJECT) == 0
), "Looks like previous quatize object is alive. Did you call clear() on the object?"
G_CONFIG_OBJECT.append(config_object)
def remove_config_object() -> None:
"""
Remove instance of quantize config class from the global list.
"""
if G_CONFIG_OBJECT:
G_CONFIG_OBJECT.clear()
def get_config_object() -> "BaseConfig":
"""
Return quantize config class object
"""
assert (
len(G_CONFIG_OBJECT) == 1
), "Have you created quantize config object before calling `quantize_model`?"
if G_CONFIG_OBJECT:
return G_CONFIG_OBJECT[0]
def is_config_object_created() -> bool:
"""
Sanity check function for whether quantize config class object is created.
"""
return len(G_CONFIG_OBJECT) == 1
| {
"content_hash": "a0af0424063ff8ab8262ac122ce5e3b6",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 100,
"avg_line_length": 29.304347826086957,
"alnum_prop": 0.6676557863501483,
"repo_name": "NVIDIA/TensorRT",
"id": "c2595bfa0f307a108c94839e2d0a395b33724274",
"size": "2038",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tools/tensorflow-quantization/tensorflow_quantization/global_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "804"
},
{
"name": "C",
"bytes": "26267"
},
{
"name": "C++",
"bytes": "174835683"
},
{
"name": "CMake",
"bytes": "73882"
},
{
"name": "Cuda",
"bytes": "713094"
},
{
"name": "Dockerfile",
"bytes": "21378"
},
{
"name": "HTML",
"bytes": "266"
},
{
"name": "Jupyter Notebook",
"bytes": "2284036"
},
{
"name": "Makefile",
"bytes": "9128"
},
{
"name": "PowerShell",
"bytes": "162"
},
{
"name": "PureBasic",
"bytes": "388"
},
{
"name": "Python",
"bytes": "2541976"
},
{
"name": "Shell",
"bytes": "20007"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import pytest
from rasa_nlu.utils import relative_normpath, recursively_find_files
def test_relative_normpath():
assert relative_normpath("/my/test/path/file.txt", "/my/test") == "path/file.txt"
def test_recursively_find_files_invalid_resource():
with pytest.raises(ValueError) as execinfo:
recursively_find_files(None)
assert "must be an existing directory" in str(execinfo.value)
def test_recursively_find_files_non_existing_dir():
with pytest.raises(ValueError) as execinfo:
recursively_find_files("my/made_up/path")
assert "Could not locate the resource" in str(execinfo.value)
| {
"content_hash": "7a98474723a3eeae1d8939605a256296",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 32.291666666666664,
"alnum_prop": 0.7380645161290322,
"repo_name": "verloop/rasa_nlu",
"id": "67c5a2c149d70ef0dcf4d1af75bf003123596a13",
"size": "775",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_pytest/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "203895"
},
{
"name": "Shell",
"bytes": "2345"
}
],
"symlink_target": ""
} |
from django.db import models
from django.contrib.auth.models import User, Group
from common.middleware import get_current_user
class Favorite(models.Model):
"""
Model representing a favorite
"""
user = models.ForeignKey(User, help_text="Owner user")
fname = models.CharField(max_length=100, help_text="Favorite name")
desc = models.CharField(max_length=100, help_text="Favorite Description")
ftype = models.CharField(max_length=30, help_text="Favorite type")
fromS = models.CharField(max_length=30, help_text="From station")
viaS = models.CharField(max_length=30, help_text="Via station")
def __unicode__(self):
return u'%s' %(self.fname)
def save(self, **args):
if not self.user:
user = get_current_user()
if user:
self.user = user
super(Favorite, self).save(args)
| {
"content_hash": "173d9385eb70850e5e05104193ae86b3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 77,
"avg_line_length": 35.44,
"alnum_prop": 0.6501128668171557,
"repo_name": "prabhu/pynationalrail",
"id": "adb61a548ef6ed306b742d2d0f327947d8898052",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myrail/railapp/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "36763"
},
{
"name": "Python",
"bytes": "44977"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
import matplotlib.pyplot as plt
import pytest
from astropy import units as u
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
from astropy.visualization.wcsaxes.core import WCSAxes
from astropy.wcs import WCS
MSX_HEADER = fits.Header.fromtextfile(get_pkg_data_filename('data/msx_header'))
def teardown_function(function):
plt.close('all')
def test_getaxislabel(ignore_matplotlibrc):
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
ax.coords[0].set_axislabel("X")
ax.coords[1].set_axislabel("Y")
assert ax.coords[0].get_axislabel() == "X"
assert ax.coords[1].get_axislabel() == "Y"
@pytest.fixture
def ax():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], aspect='equal')
fig.add_axes(ax)
return ax
def assert_label_draw(ax, x_label, y_label):
ax.coords[0].set_axislabel("Label 1")
ax.coords[1].set_axislabel("Label 2")
with patch.object(ax.coords[0].axislabels, 'set_position') as pos1:
with patch.object(ax.coords[1].axislabels, 'set_position') as pos2:
ax.figure.canvas.draw()
assert pos1.call_count == x_label
assert pos2.call_count == y_label
def test_label_visibility_rules_default(ignore_matplotlibrc, ax):
assert_label_draw(ax, True, True)
def test_label_visibility_rules_label(ignore_matplotlibrc, ax):
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, False, False)
def test_label_visibility_rules_ticks(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule('ticks')
ax.coords[1].set_axislabel_visibility_rule('ticks')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, False)
def test_label_visibility_rules_always(ignore_matplotlibrc, ax):
ax.coords[0].set_axislabel_visibility_rule('always')
ax.coords[1].set_axislabel_visibility_rule('always')
ax.coords[0].set_ticklabel_visible(False)
ax.coords[1].set_ticks(values=[-9999]*u.one)
assert_label_draw(ax, True, True)
def test_format_unit():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ori_fu = ax.coords[1].get_format_unit()
assert ori_fu == "deg"
ax.coords[1].set_format_unit("arcsec")
fu = ax.coords[1].get_format_unit()
assert fu == "arcsec"
def test_set_separator():
fig = plt.figure()
ax = WCSAxes(fig, [0.1, 0.1, 0.8, 0.8], wcs=WCS(MSX_HEADER))
fig.add_axes(ax)
# Force a draw which is required for format_coord to work
ax.figure.canvas.draw()
ax.coords[1].set_format_unit('deg')
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
ax.coords[1].set_separator((':', ':', ''))
assert ax.coords[1].format_coord(4) == '4:00:00'
ax.coords[1].set_separator('abc')
assert ax.coords[1].format_coord(4) == '4a00b00c'
ax.coords[1].set_separator(None)
assert ax.coords[1].format_coord(4) == '4\xb000\'00\"'
| {
"content_hash": "105b722bb14381c6ebff435983d886f9",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 27.25423728813559,
"alnum_prop": 0.6632462686567164,
"repo_name": "larrybradley/astropy",
"id": "701bf0d0bfd16c54c2591e74868cbba5ae9eee17",
"size": "3281",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "astropy/visualization/wcsaxes/tests/test_coordinate_helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040101"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12335716"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
} |
from six import PY3
import sys
from robot.utils import NormalizedDict
class Metadata(NormalizedDict):
def __init__(self, initial=None):
NormalizedDict.__init__(self, initial, ignore='_')
def __unicode__(self):
return u'{%s}' % ', '.join('%s: %s' % (k, self[k]) for k in self)
def __str__(self):
if PY3:
return self.__unicode__()
return unicode(self).encode('ASCII', 'replace')
#PY3
def __bytes__(self):
return str(self).encode('ASCII', 'replace')
| {
"content_hash": "2a18d08ee2303515d687bfd81e197ff7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 73,
"avg_line_length": 22.91304347826087,
"alnum_prop": 0.5692599620493358,
"repo_name": "userzimmermann/robotframework-python3",
"id": "23f6da6cae72a408979f1bd5baef9b0cf457ae47",
"size": "1135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robot/model/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16539"
},
{
"name": "HTML",
"bytes": "1011996"
},
{
"name": "Java",
"bytes": "58737"
},
{
"name": "JavaScript",
"bytes": "159003"
},
{
"name": "Python",
"bytes": "2018310"
},
{
"name": "RobotFramework",
"bytes": "4288"
},
{
"name": "Shell",
"bytes": "1093"
}
],
"symlink_target": ""
} |
import argparse
import itertools
import logging
import os
import subprocess
import sys
from ..manifest import manifest, update
here = os.path.dirname(__file__)
wpt_root = os.path.abspath(os.path.join(here, os.pardir, os.pardir))
logger = logging.getLogger()
def get_git_cmd(repo_path):
"""Create a function for invoking git commands as a subprocess."""
def git(cmd, *args):
full_cmd = ["git", cmd] + list(item.decode("utf8") if isinstance(item, bytes) else item for item in args)
try:
logger.debug(" ".join(full_cmd))
return subprocess.check_output(full_cmd, cwd=repo_path, stderr=subprocess.STDOUT).decode("utf8").strip()
except subprocess.CalledProcessError as e:
logger.error("Git command exited with status %i" % e.returncode)
logger.error(e.output)
sys.exit(1)
return git
def branch_point():
git = get_git_cmd(wpt_root)
if os.environ.get("TRAVIS_PULL_REQUEST", "false") != "false":
# This is a PR, so the base branch is in TRAVIS_BRANCH
travis_branch = os.environ.get("TRAVIS_BRANCH")
assert travis_branch, "TRAVIS_BRANCH environment variable is defined"
branch_point = git("rev-parse", travis_branch)
else:
# Otherwise we aren't on a PR, so we try to find commits that are only in the
# current branch c.f.
# http://stackoverflow.com/questions/13460152/find-first-ancestor-commit-in-another-branch
head = git("rev-parse", "HEAD")
not_heads = [item for item in git("rev-parse", "--not", "--all").split("\n")
if item.strip() and head not in item]
commits = git("rev-list", "HEAD", *not_heads).split("\n")
branch_point = None
if len(commits):
first_commit = commits[-1]
if first_commit:
branch_point = git("rev-parse", first_commit + "^")
# The above heuristic will fail in the following cases:
#
# - The current branch has fallen behind the version retrieved via the above
# `fetch` invocation
# - Changes on the current branch were rebased and therefore do not exist on any
# other branch. This will result in the selection of a commit that is earlier
# in the history than desired (as determined by calculating the later of the
# branch point and the merge base)
#
# In either case, fall back to using the merge base as the branch point.
merge_base = git("merge-base", "HEAD", "origin/master")
if (branch_point is None or
(branch_point != merge_base and
not git("log", "--oneline", "%s..%s" % (merge_base, branch_point)).strip())):
logger.debug("Using merge-base as the branch point")
branch_point = merge_base
else:
logger.debug("Using first commit on another branch as the branch point")
logger.debug("Branch point from master: %s" % branch_point)
return branch_point
def files_changed(revish, ignore_dirs=None, include_uncommitted=False, include_new=False):
"""Get and return files changed since current branch diverged from master,
excluding those that are located within any directory specifed by
`ignore_changes`."""
if ignore_dirs is None:
ignore_dirs = []
git = get_git_cmd(wpt_root)
files = git("diff", "--name-only", "-z", revish).split("\0")
assert not files[-1]
files = set(files[:-1])
if include_uncommitted:
entries = git("status", "-z").split("\0")
assert not entries[-1]
entries = entries[:-1]
for item in entries:
status, path = item.split()
if status == "??" and not include_new:
continue
else:
if not os.path.isdir(path):
files.add(path)
else:
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
files.add(os.path.join(dirpath, filename))
if not files:
return [], []
changed = []
ignored = []
for item in sorted(files):
fullpath = os.path.join(wpt_root, item)
topmost_dir = item.split(os.sep, 1)[0]
if topmost_dir in ignore_dirs:
ignored.append(fullpath)
else:
changed.append(fullpath)
return changed, ignored
def _in_repo_root(full_path):
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
return len(path_components) < 2
def _init_manifest_cache():
c = {}
def load(manifest_path=None):
if manifest_path is None:
manifest_path = os.path.join(wpt_root, "MANIFEST.json")
if c.get(manifest_path):
return c[manifest_path]
# cache at most one path:manifest
c.clear()
wpt_manifest = manifest.load(wpt_root, manifest_path)
if wpt_manifest is None:
wpt_manifest = manifest.Manifest()
update.update(wpt_root, wpt_manifest)
c[manifest_path] = wpt_manifest
return c[manifest_path]
return load
load_manifest = _init_manifest_cache()
def affected_testfiles(files_changed, skip_tests, manifest_path=None):
"""Determine and return list of test files that reference changed files."""
affected_testfiles = set()
# Exclude files that are in the repo root, because
# they are not part of any test.
files_changed = [f for f in files_changed if not _in_repo_root(f)]
nontests_changed = set(files_changed)
wpt_manifest = load_manifest(manifest_path)
test_types = ["testharness", "reftest", "wdspec"]
support_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("support")}
wdspec_test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes("wdspec")}
test_files = {os.path.join(wpt_root, path)
for _, path, _ in wpt_manifest.itertypes(*test_types)}
nontests_changed = nontests_changed.intersection(support_files)
tests_changed = set(item for item in files_changed if item in test_files)
nontest_changed_paths = set()
for full_path in nontests_changed:
rel_path = os.path.relpath(full_path, wpt_root)
path_components = rel_path.split(os.sep)
top_level_subdir = path_components[0]
if top_level_subdir in skip_tests:
continue
repo_path = "/" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, "/")
nontest_changed_paths.add((full_path, repo_path))
def affected_by_wdspec(test):
affected = False
if test in wdspec_test_files:
for support_full_path, _ in nontest_changed_paths:
# parent of support file or of "support" directory
parent = os.path.dirname(support_full_path)
if os.path.basename(parent) == "support":
parent = os.path.dirname(parent)
relpath = os.path.relpath(test, parent)
if not relpath.startswith(os.pardir):
# testfile is in subtree of support file
affected = True
break
return affected
for root, dirs, fnames in os.walk(wpt_root):
# Walk top_level_subdir looking for test files containing either the
# relative filepath or absolute filepath to the changed files.
if root == wpt_root:
for dir_name in skip_tests:
dirs.remove(dir_name)
for fname in fnames:
test_full_path = os.path.join(root, fname)
# Skip any file that's not a test file.
if test_full_path not in test_files:
continue
if affected_by_wdspec(test_full_path):
affected_testfiles.add(test_full_path)
continue
with open(test_full_path, "rb") as fh:
file_contents = fh.read()
if file_contents.startswith("\xfe\xff"):
file_contents = file_contents.decode("utf-16be", "replace")
elif file_contents.startswith("\xff\xfe"):
file_contents = file_contents.decode("utf-16le", "replace")
else:
file_contents = file_contents.decode("utf8", "replace")
for full_path, repo_path in nontest_changed_paths:
rel_path = os.path.relpath(full_path, root).replace(os.path.sep, "/")
if rel_path in file_contents or repo_path in file_contents:
affected_testfiles.add(test_full_path)
continue
return tests_changed, affected_testfiles
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("revish", default=None, help="Commits to consider. Defaults to the commits on the current branch", nargs="?")
parser.add_argument("--ignore-dirs", nargs="*", type=set, default=set(["resources"]),
help="Directories to exclude from the list of changes")
parser.add_argument("--modified", action="store_true",
help="Include files under version control that have been modified or staged")
parser.add_argument("--new", action="store_true",
help="Include files in the worktree that are not in version control")
parser.add_argument("--show-type", action="store_true",
help="Print the test type along with each affected test")
return parser
def get_parser_affected():
parser = get_parser()
parser.add_argument("--metadata",
dest="metadata_root",
action="store",
default=wpt_root,
help="Directory that will contain MANIFEST.json")
return parser
def get_revish(**kwargs):
revish = kwargs["revish"]
if kwargs["revish"] is None:
revish = "%s..HEAD" % branch_point()
return revish
def run_changed_files(**kwargs):
revish = get_revish(**kwargs)
changed, _ = files_changed(revish, kwargs["ignore_dirs"],
include_uncommitted=kwargs["modified"],
include_new=kwargs["new"])
for item in sorted(changed):
print(os.path.relpath(item, wpt_root))
def run_tests_affected(**kwargs):
revish = get_revish(**kwargs)
changed, _ = files_changed(revish, kwargs["ignore_dirs"],
include_uncommitted=kwargs["modified"],
include_new=kwargs["new"])
manifest_path = os.path.join(kwargs["metadata_root"], "MANIFEST.json")
tests_changed, dependents = affected_testfiles(
changed,
set(["conformance-checkers", "docs", "tools"]),
manifest_path=manifest_path
)
message = "{path}"
if kwargs["show_type"]:
wpt_manifest = load_manifest(manifest_path)
message = "{path}\t{item_type}"
for item in sorted(tests_changed | dependents):
results = {
"path": os.path.relpath(item, wpt_root)
}
if kwargs["show_type"]:
item_types = {i.item_type for i in wpt_manifest.iterpath(results["path"])}
if len(item_types) != 1:
item_types = [" ".join(item_types)]
results["item_type"] = item_types.pop()
print(message.format(**results))
| {
"content_hash": "9e420cbab49b57de233edd501623205f",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 133,
"avg_line_length": 40.44055944055944,
"alnum_prop": 0.5895728860453052,
"repo_name": "youtube/cobalt",
"id": "89e6e2764aeaed264951f1358c5377ec608856b8",
"size": "11566",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/web_platform_tests/tools/wpt/testfiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
Remove tweets containing labels we don't want.
Constantine Lignos
February 2013
"""
# Copyright (c) 2013 Constantine Lignos
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import codecs
# Hack to allow import of split_token, forces this to be run from subdir.
sys.path.append('..')
from eval_codeswitch import split_token
output = codecs.getwriter('utf_8')(sys.stdout)
for line in codecs.getreader('utf_8')(sys.stdin):
line = line.rstrip()
try:
_, tags = zip(*[split_token(token, False) for token in line.split()])
except ValueError as err:
print >> sys.stderr, err
print >> sys.stderr, "From line:", repr(line)
continue
# Skip any tags with multiple annotations
if any('|' in tag or 'oth' in tag for tag in tags):
continue
print >> output, line
| {
"content_hash": "60c6b48897ace7c7f3d3096fb342de2d",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 36.01724137931034,
"alnum_prop": 0.7410244135950216,
"repo_name": "ConstantineLignos/Codeswitchador",
"id": "895766d88d44ff97cfa5849c38cf5775698b6ef8",
"size": "2111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/filter_labels.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "85732"
},
{
"name": "Shell",
"bytes": "2361"
}
],
"symlink_target": ""
} |
from abc import abstractmethod, ABCMeta
from .security import Permissions, require
from .utils import json_response, validate_query
class AbstractResource(metaclass=ABCMeta):
def __init__(self, *, primary_key, resource_name=None):
class_name = self.__class__.__name__.lower()
self._resource_name = resource_name or class_name
self._primary_key = primary_key
@property
def primary_key(self):
return self._primary_key
@abstractmethod
async def list(self, request): # pragma: no cover
await require(request, Permissions.view)
q = validate_query(request.GET)
assert q
# total number of results should be supplied in separate
headers = {'X-Total-Count': str(0)}
return json_response({}, headers=headers)
@abstractmethod
async def detail(self, request): # pragma: no cover
await require(request, Permissions.view)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
@abstractmethod
async def create(self, request): # pragma: no cover
await require(request, Permissions.add)
return json_response({})
@abstractmethod
async def update(self, request): # pragma: no cover
await require(request, Permissions.edit)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
@abstractmethod
async def delete(self, request): # pragma: no cover
await require(request, Permissions.delete)
entity_id = request.match_info['entity_id']
assert entity_id
return json_response({})
def setup(self, app, base_url):
url = str(base_url / self._resource_name)
url_id = url + '/{entity_id}'
add_route = app.router.add_route
add_route('GET', url, self.list)
add_route('GET', url_id, self.detail)
add_route('POST', url, self.create)
add_route('PUT', url_id, self.update)
add_route('DELETE', url_id, self.delete)
| {
"content_hash": "ac02a438ecf9d2834628ed447817c8c4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 64,
"avg_line_length": 33.403225806451616,
"alnum_prop": 0.632061805890874,
"repo_name": "jettify/aiohttp_admin",
"id": "c8ad8277a5d96ff6375522f6a96476c97f88154e",
"size": "2071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aiohttp_admin/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3192"
},
{
"name": "HTML",
"bytes": "5621"
},
{
"name": "JavaScript",
"bytes": "38981"
},
{
"name": "Makefile",
"bytes": "3565"
},
{
"name": "PLpgSQL",
"bytes": "1742"
},
{
"name": "Python",
"bytes": "105379"
},
{
"name": "Shell",
"bytes": "996"
}
],
"symlink_target": ""
} |
from django.conf.urls import *
from SAMPLEAPP.views import *
urlpatterns = patterns('',
#url(r'^SAMPLEAPP/foo/$', SAMPLEAPPView.as_view(), name='SAMPLEAPP_view'),
) | {
"content_hash": "0b2cb27c8c032b45196c9f95d1d399ee",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 82,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.6878612716763006,
"repo_name": "auzigog/jbrinkerhoff.com",
"id": "4cf3e8fe2cb3fff34507b9b0453568932717a198",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "root/apps/SAMPLEAPP/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "37394"
},
{
"name": "Python",
"bytes": "14239"
}
],
"symlink_target": ""
} |
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_service_reference import V1beta1ServiceReference
class TestV1beta1ServiceReference(unittest.TestCase):
""" V1beta1ServiceReference unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ServiceReference(self):
"""
Test V1beta1ServiceReference
"""
model = kubernetes.client.models.v1beta1_service_reference.V1beta1ServiceReference()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3cbf547fbbf83223feffc10f14645764",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 105,
"avg_line_length": 22.65,
"alnum_prop": 0.7130242825607064,
"repo_name": "djkonro/client-python",
"id": "782131f15ad513a1937480c4c9c53a1bd9a19042",
"size": "923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kubernetes/test/test_v1beta1_service_reference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6527154"
},
{
"name": "Shell",
"bytes": "16522"
}
],
"symlink_target": ""
} |
import os
import logging
from dessn.framework.fitter import Fitter
from dessn.framework.models.approx_model import ApproximateModel
from dessn.framework.simulations.snana_sys import SNANASysSimulation
from dessn.framework.simulations.selection_effects import lowz_sel, des_sel
from chainconsumer import ChainConsumer
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
plot_dir = os.path.dirname(os.path.abspath(__file__)) + "/plots/%s/" % os.path.basename(__file__)[:-3]
pfn = plot_dir + os.path.basename(__file__)[:-3]
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
model = ApproximateModel(global_calibration=0)
simulation = [SNANASysSimulation(300, sys_index=0, sim="lowz", manual_selection=lowz_sel()),
SNANASysSimulation(500, sys_index=0, sim="des", manual_selection=des_sel())]
filenames = ["approximate_systematic_%d_test" % i for i in range(7)]
names = ["Stat only", "ZP offset .02 mag (Gauss)", r"Filter $\Lambda$ shift 20$\textup{\AA}$ gaus",
"10\\% Gauss error in biasCor flux errors", "idem, but with incorrect reported fluxErr",
"MWEBV scale from 20\\% Gauss error", "MW RV shift from 0.2 Gauss error"]
names = ["Stat only", "ZP offset", r"Filter $\lambda$ shift",
"Bias corrected flux error", "idem + uncertainty error",
"MW $E(B-V)$ scale error", "MW $R_V$ shift"]
dir_names = [os.path.dirname(os.path.abspath(__file__)) + "/plots/%s/output/" % f for f in filenames]
c = ChainConsumer()
for dir_name, filename, name in zip(dir_names, filenames, names):
print(dir_name)
fitter = Fitter(dir_name)
fitter.set_models(model)
fitter.set_simulations(simulation)
m, s, chain, truth, weight, old_weight, posterior = fitter.load()
c.add_chain(chain, weights=weight, posterior=posterior, name=name)
ls = ["-"] + ["-"] * (len(dir_names) - 1)
colors = ['k', 'b', 'r', 'g', 'purple', 'o', 'lb']
alphas = [0.3] + [0.0] * (len(dir_names) - 1)
c.configure(label_font_size=10, tick_font_size=10, diagonal_tick_labels=False, linestyles=ls,
colors=colors, shade_alpha=alphas, shade=True, bar_shade=True)
print("Plotting distributions")
c.plotter.plot_distributions(filename=pfn + "_dist.png", truth=truth, col_wrap=8)
params = ['$\\Omega_m$', '$\\alpha$', '$\\beta$', '$\\langle M_B \\rangle$']
print("Plotting plot")
c.plotter.plot(filename=[pfn + ".png", pfn + ".pdf"], parameters=params)
print("Plotting summary")
c.plotter.plot_summary(filename=[pfn + "_summary.png", pfn + "_summary.pdf"], parameters=['$\\Omega_m$'],
extents={'$\\Omega_m$': [0.1, 0.45]}, truth="Stat only", extra_parameter_spacing=1.5)
| {
"content_hash": "72c8b11225bb6e3f2c925017559f3eb6",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 112,
"avg_line_length": 51.833333333333336,
"alnum_prop": 0.6312968917470525,
"repo_name": "dessn/sn-bhm",
"id": "e31d51f675d8940b3ea51a4f18c1975793db1fb5",
"size": "2799",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dessn/configurations/old/approximate_systematic_load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "316"
},
{
"name": "HTML",
"bytes": "140"
},
{
"name": "Python",
"bytes": "342893"
},
{
"name": "Shell",
"bytes": "2079"
},
{
"name": "Stan",
"bytes": "59737"
},
{
"name": "TeX",
"bytes": "595827"
}
],
"symlink_target": ""
} |
"""
This is an example settings/local.py file.
These settings overrides what's in settings/base.py
"""
from . import base
# To extend any settings from settings/base.py here's an example.
# If you don't need to extend any settings from base.py, you do not need
# to import base above
INSTALLED_APPS = base.INSTALLED_APPS + ('django_nose',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db/development.sqlite3',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
#'OPTIONS': {
# 'init_command': 'SET storage_engine=InnoDB',
# 'charset' : 'utf8',
# 'use_unicode' : True,
#},
#'TEST_CHARSET': 'utf8',
#'TEST_COLLATION': 'utf8_general_ci',
},
# 'slave': {
# ...
# },
}
# Recipients of traceback emails and other notifications.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
}
# SECURITY WARNING: don't run with debug turned on in production!
# Debugging displays nice error messages, but leaks memory. Set this to False
# on all server instances and True only for development.
DEBUG = TEMPLATE_DEBUG = True
# Is this a development instance? Set this to True on development/master
# instances and False on stage/prod.
DEV = True
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# SECURITY WARNING: keep the secret key used in production secret!
# Hardcoded values can leak through source control. Consider loading
# the secret key from an environment variable or a file instead.
SECRET_KEY = '=+!9j6yxpogud&-qi8mq&7rr6evuqo@24_xzd$hy+@19u#4#q@'
# Uncomment these to activate and customize Celery:
# CELERY_ALWAYS_EAGER = False # required to activate celeryd
# BROKER_HOST = 'localhost'
# BROKER_PORT = 5672
# BROKER_USER = 'django'
# BROKER_PASSWORD = 'django'
# BROKER_VHOST = 'django'
# CELERY_RESULT_BACKEND = 'amqp'
## Log settings
# Remove this configuration variable to use your custom logging configuration
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'loggers': {
'quizme': {
'level': "DEBUG"
}
}
}
INTERNAL_IPS = ('127.0.0.1')
| {
"content_hash": "c0626688db896309a8fb9d854e4fa8de",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 27.808988764044944,
"alnum_prop": 0.6521212121212121,
"repo_name": "adandan01/quizeme",
"id": "37567617e894c6addb7ec6062bbd75c0ae8f2573",
"size": "2475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quizme/settings/local-dist.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11300"
},
{
"name": "JavaScript",
"bytes": "734"
},
{
"name": "Python",
"bytes": "25081"
},
{
"name": "Shell",
"bytes": "8144"
}
],
"symlink_target": ""
} |
import traceback
from collections import namedtuple, defaultdict
import itertools
import logging
import textwrap
from shutil import get_terminal_size
from .abstract import Callable, DTypeSpec, Dummy, Literal, Type, weakref
from .common import Opaque
from .misc import unliteral
from numba.core import errors, utils, types, config
from numba.core.typeconv import Conversion
_logger = logging.getLogger(__name__)
# terminal color markup
_termcolor = errors.termcolor()
_FAILURE = namedtuple('_FAILURE', 'template matched error literal')
_termwidth = get_terminal_size().columns
# pull out the lead line as unit tests often use this
_header_lead = "No implementation of function"
_header_template = (_header_lead + " {the_function} found for signature:\n \n "
">>> {fname}({signature})\n \nThere are {ncandidates} "
"candidate implementations:")
_reason_template = """
" - Of which {nmatches} did not match due to:\n
"""
def _wrapper(tmp, indent=0):
return textwrap.indent(tmp, ' ' * indent, lambda line: True)
_overload_template = ("- Of which {nduplicates} did not match due to:\n"
"{kind} {inof} function '{function}': File: {file}: "
"Line {line}.\n With argument(s): '({args})':")
_err_reasons = {'specific_error': "Rejected as the implementation raised a "
"specific error:\n{}"}
def _bt_as_lines(bt):
"""
Converts a backtrace into a list of lines, squashes it a bit on the way.
"""
return [y for y in itertools.chain(*[x.split('\n') for x in bt]) if y]
def argsnkwargs_to_str(args, kwargs):
buf = [str(a) for a in tuple(args)]
buf.extend(["{}={}".format(k, v) for k, v in kwargs.items()])
return ', '.join(buf)
class _ResolutionFailures(object):
"""Collect and format function resolution failures.
"""
def __init__(self, context, function_type, args, kwargs, depth=0):
self._context = context
self._function_type = function_type
self._args = args
self._kwargs = kwargs
self._failures = defaultdict(list)
self._depth = depth
self._max_depth = 5
self._scale = 2
def __len__(self):
return len(self._failures)
def add_error(self, calltemplate, matched, error, literal):
"""
Args
----
calltemplate : CallTemplate
error : Exception or str
Error message
"""
isexc = isinstance(error, Exception)
errclazz = '%s: ' % type(error).__name__ if isexc else ''
key = "{}{}".format(errclazz, str(error))
self._failures[key].append(_FAILURE(calltemplate, matched, error,
literal))
def format(self):
"""Return a formatted error message from all the gathered errors.
"""
indent = ' ' * self._scale
argstr = argsnkwargs_to_str(self._args, self._kwargs)
ncandidates = sum([len(x) for x in self._failures.values()])
# sort out a display name for the function
tykey = self._function_type.typing_key
# most things have __name__
fname = getattr(tykey, '__name__', None)
is_external_fn_ptr = isinstance(self._function_type,
ExternalFunctionPointer)
if fname is None:
if is_external_fn_ptr:
fname = "ExternalFunctionPointer"
else:
fname = "<unknown function>"
msgbuf = [_header_template.format(the_function=self._function_type,
fname=fname,
signature=argstr,
ncandidates=ncandidates)]
nolitargs = tuple([unliteral(a) for a in self._args])
nolitkwargs = {k: unliteral(v) for k, v in self._kwargs.items()}
nolitargstr = argsnkwargs_to_str(nolitargs, nolitkwargs)
# depth could potentially get massive, so limit it.
ldepth = min(max(self._depth, 0), self._max_depth)
def template_info(tp):
src_info = tp.get_template_info()
unknown = "unknown"
source_name = src_info.get('name', unknown)
source_file = src_info.get('filename', unknown)
source_lines = src_info.get('lines', unknown)
source_kind = src_info.get('kind', 'Unknown template')
return source_name, source_file, source_lines, source_kind
for i, (k, err_list) in enumerate(self._failures.items()):
err = err_list[0]
nduplicates = len(err_list)
template, error = err.template, err.error
ifo = template_info(template)
source_name, source_file, source_lines, source_kind = ifo
largstr = argstr if err.literal else nolitargstr
if err.error == "No match.":
err_dict = defaultdict(set)
for errs in err_list:
err_dict[errs.template].add(errs.literal)
# if there's just one template, and it's erroring on
# literal/nonliteral be specific
if len(err_dict) == 1:
template = [_ for _ in err_dict.keys()][0]
source_name, source_file, source_lines, source_kind = \
template_info(template)
source_lines = source_lines[0]
else:
source_file = "<numerous>"
source_lines = "N/A"
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind=source_kind.title(),
function=fname,
inof='of',
file=source_file,
line=source_lines,
args=largstr),
ldepth + 1)))
msgbuf.append(_termcolor.highlight(_wrapper(err.error,
ldepth + 2)))
else:
# There was at least one match in this failure class, but it
# failed for a specific reason try and report this.
msgbuf.append(_termcolor.errmsg(
_wrapper(_overload_template.format(nduplicates=nduplicates,
kind=source_kind.title(),
function=source_name,
inof='in',
file=source_file,
line=source_lines[0],
args=largstr),
ldepth + 1)))
if isinstance(error, BaseException):
reason = indent + self.format_error(error)
errstr = _err_reasons['specific_error'].format(reason)
else:
errstr = error
# if you are a developer, show the back traces
if config.DEVELOPER_MODE:
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
bt_as_lines = _bt_as_lines(bt)
nd2indent = '\n{}'.format(2 * indent)
errstr += _termcolor.reset(nd2indent +
nd2indent.join(bt_as_lines))
msgbuf.append(_termcolor.highlight(_wrapper(errstr,
ldepth + 2)))
loc = self.get_loc(template, error)
if loc:
msgbuf.append('{}raised from {}'.format(indent, loc))
# the commented bit rewraps each block, may not be helpful?!
return _wrapper('\n'.join(msgbuf) + '\n') # , self._scale * ldepth)
def format_error(self, error):
"""Format error message or exception
"""
if isinstance(error, Exception):
return '{}: {}'.format(type(error).__name__, error)
else:
return '{}'.format(error)
def get_loc(self, classtemplate, error):
"""Get source location information from the error message.
"""
if isinstance(error, Exception) and hasattr(error, '__traceback__'):
# traceback is unavailable in py2
frame = traceback.extract_tb(error.__traceback__)[-1]
return "{}:{}".format(frame[0], frame[1])
def raise_error(self):
for faillist in self._failures.values():
for fail in faillist:
if isinstance(fail.error, errors.ForceLiteralArg):
raise fail.error
raise errors.TypingError(self.format())
def _unlit_non_poison(ty):
"""Apply unliteral(ty) and raise a TypingError if type is Poison.
"""
out = unliteral(ty)
if isinstance(out, types.Poison):
m = f"Poison type used in arguments; got {out}"
raise errors.TypingError(m)
return out
class BaseFunction(Callable):
"""
Base type class for some function types.
"""
def __init__(self, template):
if isinstance(template, (list, tuple)):
self.templates = tuple(template)
keys = set(temp.key for temp in self.templates)
if len(keys) != 1:
raise ValueError("incompatible templates: keys = %s"
% (keys,))
self.typing_key, = keys
else:
self.templates = (template,)
self.typing_key = template.key
self._impl_keys = {}
name = "%s(%s)" % (self.__class__.__name__, self.typing_key)
self._depth = 0
super(BaseFunction, self).__init__(name)
@property
def key(self):
return self.typing_key, self.templates
def augment(self, other):
"""
Augment this function type with the other function types' templates,
so as to support more input types.
"""
if type(other) is type(self) and other.typing_key == self.typing_key:
return type(self)(self.templates + other.templates)
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self._impl_keys[sig.args]
def get_call_type(self, context, args, kws):
prefer_lit = [True, False] # old behavior preferring literal
prefer_not = [False, True] # new behavior preferring non-literal
failures = _ResolutionFailures(context, self, args, kws,
depth=self._depth)
# get the order in which to try templates
from numba.core.target_extension import get_local_target # circular
target_hw = get_local_target(context)
order = utils.order_by_target_specificity(target_hw, self.templates,
fnkey=self.key[0])
self._depth += 1
for temp_cls in order:
temp = temp_cls(context)
# The template can override the default and prefer literal args
choice = prefer_lit if temp.prefer_literal else prefer_not
for uselit in choice:
try:
if uselit:
sig = temp.apply(args, kws)
else:
nolitargs = tuple([_unlit_non_poison(a) for a in args])
nolitkws = {k: _unlit_non_poison(v)
for k, v in kws.items()}
sig = temp.apply(nolitargs, nolitkws)
except Exception as e:
if (utils.use_new_style_errors() and not
isinstance(e, errors.NumbaError)):
raise e
else:
sig = None
failures.add_error(temp, False, e, uselit)
else:
if sig is not None:
self._impl_keys[sig.args] = temp.get_impl_key(sig)
self._depth -= 1
return sig
else:
registered_sigs = getattr(temp, 'cases', None)
if registered_sigs is not None:
msg = "No match for registered cases:\n%s"
msg = msg % '\n'.join(" * {}".format(x) for x in
registered_sigs)
else:
msg = 'No match.'
failures.add_error(temp, True, msg, uselit)
failures.raise_error()
def get_call_signatures(self):
sigs = []
is_param = False
for temp in self.templates:
sigs += getattr(temp, 'cases', [])
is_param = is_param or hasattr(temp, 'generic')
return sigs, is_param
class Function(BaseFunction, Opaque):
"""
Type class for builtin functions implemented by Numba.
"""
class BoundFunction(Callable, Opaque):
"""
A function with an implicit first argument (denoted as *this* below).
"""
def __init__(self, template, this):
# Create a derived template with an attribute *this*
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
self.template = newcls
self.typing_key = self.template.key
self.this = this
name = "%s(%s for %s)" % (self.__class__.__name__,
self.typing_key, self.this)
super(BoundFunction, self).__init__(name)
def unify(self, typingctx, other):
if (isinstance(other, BoundFunction) and
self.typing_key == other.typing_key):
this = typingctx.unify_pairs(self.this, other.this)
if this is not None:
# XXX is it right that both template instances are distinct?
return self.copy(this=this)
def copy(self, this):
return type(self)(self.template, this)
@property
def key(self):
# FIXME: With target-overload, the MethodTemplate can change depending
# on the target.
unique_impl = getattr(self.template, "_overload_func", None)
return self.typing_key, self.this, unique_impl
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self.typing_key
def get_call_type(self, context, args, kws):
template = self.template(context)
literal_e = None
nonliteral_e = None
out = None
choice = [True, False] if template.prefer_literal else [False, True]
for uselit in choice:
if uselit:
# Try with Literal
try:
out = template.apply(args, kws)
except Exception as exc:
if (utils.use_new_style_errors() and not
isinstance(exc, errors.NumbaError)):
raise exc
if isinstance(exc, errors.ForceLiteralArg):
raise exc
literal_e = exc
out = None
else:
break
else:
# if the unliteral_args and unliteral_kws are the same as the
# literal ones, set up to not bother retrying
unliteral_args = tuple([_unlit_non_poison(a) for a in args])
unliteral_kws = {k: _unlit_non_poison(v)
for k, v in kws.items()}
skip = unliteral_args == args and kws == unliteral_kws
# If the above template application failed and the non-literal
# args are different to the literal ones, try again with
# literals rewritten as non-literals
if not skip and out is None:
try:
out = template.apply(unliteral_args, unliteral_kws)
except Exception as exc:
if isinstance(exc, errors.ForceLiteralArg):
if template.prefer_literal:
# For template that prefers literal types,
# reaching here means that the literal types
# have failed typing as well.
raise exc
nonliteral_e = exc
else:
break
if out is None and (nonliteral_e is not None or literal_e is not None):
header = "- Resolution failure for {} arguments:\n{}\n"
tmplt = _termcolor.highlight(header)
if config.DEVELOPER_MODE:
indent = ' ' * 4
def add_bt(error):
if isinstance(error, BaseException):
# if the error is an actual exception instance, trace it
bt = traceback.format_exception(type(error), error,
error.__traceback__)
else:
bt = [""]
nd2indent = '\n{}'.format(2 * indent)
errstr = _termcolor.reset(nd2indent +
nd2indent.join(_bt_as_lines(bt)))
return _termcolor.reset(errstr)
else:
add_bt = lambda X: ''
def nested_msg(literalness, e):
estr = str(e)
estr = estr if estr else (str(repr(e)) + add_bt(e))
new_e = errors.TypingError(textwrap.dedent(estr))
return tmplt.format(literalness, str(new_e))
raise errors.TypingError(nested_msg('literal', literal_e) +
nested_msg('non-literal', nonliteral_e))
return out
def get_call_signatures(self):
sigs = getattr(self.template, 'cases', [])
is_param = hasattr(self.template, 'generic')
return sigs, is_param
class MakeFunctionLiteral(Literal, Opaque):
pass
class _PickleableWeakRef(weakref.ref):
"""
Allow a weakref to be pickled.
Note that if the object referred to is not kept alive elsewhere in the
pickle, the weakref will immediately expire after being constructed.
"""
def __getnewargs__(self):
obj = self()
if obj is None:
raise ReferenceError("underlying object has vanished")
return (obj,)
class WeakType(Type):
"""
Base class for types parametered by a mortal object, to which only
a weak reference is kept.
"""
def _store_object(self, obj):
self._wr = _PickleableWeakRef(obj)
def _get_object(self):
obj = self._wr()
if obj is None:
raise ReferenceError("underlying object has vanished")
return obj
@property
def key(self):
return self._wr
def __eq__(self, other):
if type(self) is type(other):
obj = self._wr()
return obj is not None and obj is other._wr()
return NotImplemented
def __hash__(self):
return Type.__hash__(self)
class Dispatcher(WeakType, Callable, Dummy):
"""
Type class for @jit-compiled functions.
"""
def __init__(self, dispatcher):
self._store_object(dispatcher)
super(Dispatcher, self).__init__("type(%s)" % dispatcher)
def dump(self, tab=''):
print((f'{tab}DUMP {type(self).__name__}[code={self._code}, '
f'name={self.name}]'))
self.dispatcher.dump(tab=tab + ' ')
print(f'{tab}END DUMP')
def get_call_type(self, context, args, kws):
"""
Resolve a call to this dispatcher using the given argument types.
A signature returned and it is ensured that a compiled specialization
is available for it.
"""
template, pysig, args, kws = \
self.dispatcher.get_call_template(args, kws)
sig = template(context).apply(args, kws)
if sig:
sig = sig.replace(pysig=pysig)
return sig
def get_call_signatures(self):
sigs = self.dispatcher.nopython_signatures
return sigs, True
@property
def dispatcher(self):
"""
A strong reference to the underlying numba.dispatcher.Dispatcher
instance.
"""
disp = self._get_object()
# TODO: improve interface to avoid the dynamic check here
if hasattr(disp, "_get_dispatcher_for_current_target"):
return disp._get_dispatcher_for_current_target()
else:
return disp
def get_overload(self, sig):
"""
Get the compiled overload for the given signature.
"""
return self.dispatcher.get_overload(sig.args)
def get_impl_key(self, sig):
"""
Get the implementation key for the given signature.
"""
return self.get_overload(sig)
def unify(self, context, other):
return utils.unified_function_type((self, other), require_precise=False)
def can_convert_to(self, typingctx, other):
if isinstance(other, types.FunctionType):
if self.dispatcher.get_compile_result(other.signature):
return Conversion.safe
class ObjModeDispatcher(Dispatcher):
"""Dispatcher subclass that enters objectmode function.
"""
pass
class ExternalFunctionPointer(BaseFunction):
"""
A pointer to a native function (e.g. exported via ctypes or cffi).
*get_pointer* is a Python function taking an object
and returning the raw pointer value as an int.
"""
def __init__(self, sig, get_pointer, cconv=None):
from numba.core.typing.templates import (AbstractTemplate,
make_concrete_template,
signature)
from numba.core.types import ffi_forced_object
if sig.return_type == ffi_forced_object:
raise TypeError("Cannot return a pyobject from a external function")
self.sig = sig
self.requires_gil = any(a == ffi_forced_object for a in self.sig.args)
self.get_pointer = get_pointer
self.cconv = cconv
if self.requires_gil:
class GilRequiringDefn(AbstractTemplate):
key = self.sig
def generic(self, args, kws):
if kws:
raise TypeError("does not support keyword arguments")
# Make ffi_forced_object a bottom type to allow any type to
# be casted to it. This is the only place that support
# ffi_forced_object.
coerced = [actual if formal == ffi_forced_object else formal
for actual, formal
in zip(args, self.key.args)]
return signature(self.key.return_type, *coerced)
template = GilRequiringDefn
else:
template = make_concrete_template("CFuncPtr", sig, [sig])
super(ExternalFunctionPointer, self).__init__(template)
@property
def key(self):
return self.sig, self.cconv, self.get_pointer
class ExternalFunction(Function):
"""
A named native function (resolvable by LLVM) accepting an explicit
signature. For internal use only.
"""
def __init__(self, symbol, sig):
from numba.core import typing
self.symbol = symbol
self.sig = sig
template = typing.make_concrete_template(symbol, symbol, [sig])
super(ExternalFunction, self).__init__(template)
@property
def key(self):
return self.symbol, self.sig
class NamedTupleClass(Callable, Opaque):
"""
Type class for namedtuple classes.
"""
def __init__(self, instance_class):
self.instance_class = instance_class
name = "class(%s)" % (instance_class)
super(NamedTupleClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in
# typing.collections
return None
def get_call_signatures(self):
return (), True
def get_impl_key(self, sig):
return type(self)
@property
def key(self):
return self.instance_class
class NumberClass(Callable, DTypeSpec, Opaque):
"""
Type class for number classes (e.g. "np.float64").
"""
def __init__(self, instance_type):
self.instance_type = instance_type
name = "class(%s)" % (instance_type,)
super(NumberClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overridden by the __call__ constructor resolution in typing.builtins
return None
def get_call_signatures(self):
return (), True
def get_impl_key(self, sig):
return type(self)
@property
def key(self):
return self.instance_type
@property
def dtype(self):
return self.instance_type
class RecursiveCall(Opaque):
"""
Recursive call to a Dispatcher.
"""
_overloads = None
def __init__(self, dispatcher_type):
assert isinstance(dispatcher_type, Dispatcher)
self.dispatcher_type = dispatcher_type
name = "recursive(%s)" % (dispatcher_type,)
super(RecursiveCall, self).__init__(name)
# Initializing for the first time
if self._overloads is None:
self._overloads = {}
@property
def overloads(self):
return self._overloads
@property
def key(self):
return self.dispatcher_type
| {
"content_hash": "d4b8a8d22676a942dcd5b393e0be7a25",
"timestamp": "",
"source": "github",
"line_count": 726,
"max_line_length": 80,
"avg_line_length": 36.64462809917355,
"alnum_prop": 0.5274770711171253,
"repo_name": "seibert/numba",
"id": "64b2bda24ddb3012245d013ad162daef5485ae52",
"size": "26604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/core/types/functions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6877"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8688132"
},
{
"name": "Shell",
"bytes": "13404"
}
],
"symlink_target": ""
} |
"""
Commands helpers.
.. currentmodule:: curious.commands
.. autosummary::
:toctree: commands
manager
context
decorators
plugin
utils
ratelimit
help
conditions
exc
converters
"""
from curious.commands.context import Context
from curious.commands.decorators import command, condition
from curious.commands.manager import CommandsManager
from curious.commands.plugin import Plugin
| {
"content_hash": "f467c75f2329131f900816895f1b52a8",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 58,
"avg_line_length": 17.791666666666668,
"alnum_prop": 0.7353629976580797,
"repo_name": "SunDwarf/curious",
"id": "9b961afbdaa6370f395e3b3ea710451d0bf9bf25",
"size": "1102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "curious/commands/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "522950"
}
],
"symlink_target": ""
} |
from dex.tools.clang_opt_bisect.Tool import Tool
| {
"content_hash": "83418c6ac2dce2c17dbed4de48df2ae5",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 48,
"avg_line_length": 49,
"alnum_prop": 0.8163265306122449,
"repo_name": "endlessm/chromium-browser",
"id": "b933e690b236fde57540a4cfc2b4709991304999",
"size": "328",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "third_party/llvm/debuginfo-tests/dexter/dex/tools/clang_opt_bisect/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Tests for the VMDK image path specification implementation."""
import unittest
from dfvfs.path import vmdk_path_spec
from tests.path import test_lib
class VMDKPathSpecTest(test_lib.PathSpecTestCase):
"""Tests for the VMDK image path specification implementation."""
def testInitialize(self):
"""Tests the path specification initialization."""
path_spec = vmdk_path_spec.VMDKPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
with self.assertRaises(ValueError):
vmdk_path_spec.VMDKPathSpec(parent=None)
with self.assertRaises(ValueError):
vmdk_path_spec.VMDKPathSpec(parent=self._path_spec, bogus='BOGUS')
def testComparable(self):
"""Tests the path specification comparable property."""
path_spec = vmdk_path_spec.VMDKPathSpec(parent=self._path_spec)
self.assertIsNotNone(path_spec)
expected_comparable = '\n'.join([
'type: TEST',
'type: VMDK',
''])
self.assertEqual(path_spec.comparable, expected_comparable)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7327bb71c71fb7926944973ae6479f91",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 26.725,
"alnum_prop": 0.7043966323666978,
"repo_name": "log2timeline/dfvfs",
"id": "a1af0d491dd78a26bab472b1441b750539828baf",
"size": "1115",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/path/vmdk_path_spec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14212"
},
{
"name": "Makefile",
"bytes": "122"
},
{
"name": "PowerShell",
"bytes": "1021"
},
{
"name": "Python",
"bytes": "2176548"
},
{
"name": "Shell",
"bytes": "19355"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import six
import datetime
import calendar
import uuid
import math
import io
import struct
from collections import OrderedDict
import logging
from struct import pack, unpack
from aenum import Enum
from datetime import timedelta
from gremlin_python import statics
from gremlin_python.statics import FloatType, FunctionType, IntType, LongType, TypeType, DictType, ListType, SetType, \
SingleByte, ByteBufferType, GremlinType, SingleChar
from gremlin_python.process.traversal import Barrier, Binding, Bytecode, Cardinality, Column, Direction, Operator, \
Order, Pick, Pop, P, Scope, TextP, Traversal, Traverser, \
TraversalStrategy, T
from gremlin_python.process.graph_traversal import GraphTraversal
from gremlin_python.structure.graph import Graph, Edge, Property, Vertex, VertexProperty, Path
from gremlin_python.structure.io.util import HashableDict
log = logging.getLogger(__name__)
# When we fall back to a superclass's serializer, we iterate over this map.
# We want that iteration order to be consistent, so we use an OrderedDict,
# not a dict.
_serializers = OrderedDict()
_deserializers = {}
class DataType(Enum):
null = 0xfe
int = 0x01
long = 0x02
string = 0x03
date = 0x04
timestamp = 0x05
clazz = 0x06
double = 0x07
float = 0x08
list = 0x09
map = 0x0a
set = 0x0b
uuid = 0x0c
edge = 0x0d
path = 0x0e
property = 0x0f
graph = 0x10 # not supported - no graph object in python yet
vertex = 0x11
vertexproperty = 0x12
barrier = 0x13
binding = 0x14
bytecode = 0x15
cardinality = 0x16
column = 0x17
direction = 0x18
operator = 0x19
order = 0x1a
pick = 0x1b
pop = 0x1c
lambda_ = 0x1d
p = 0x1e
scope = 0x1f
t = 0x20
traverser = 0x21
bigdecimal = 0x22 # todo
biginteger = 0x23 # todo
byte = 0x24
bytebuffer = 0x25
short = 0x26 # todo
boolean = 0x27
textp = 0x28
traversalstrategy = 0x29
bulkset = 0x2a
tree = 0x2b # not supported - no tree object in Python yet
metrics = 0x2c
traversalmetrics = 0x2d
char = 0x80
duration = 0x81
inetaddress = 0x82 # todo
instant = 0x83 # todo
localdate = 0x84 # todo
localdatetime = 0x85 # todo
localtime = 0x86 # todo
monthday = 0x87 # todo
offsetdatetime = 0x88 # todo
offsettime = 0x89 # todo
period = 0x8a # todo
year = 0x8b # todo
yearmonth = 0x8c # todo
zonedatetime = 0x8d # todo
zoneoffset = 0x8e # todo
custom = 0x00 # todo
NULL_BYTES = [DataType.null.value, 0x01]
def _make_packer(format_string):
packer = struct.Struct(format_string)
pack = packer.pack
unpack = lambda s: packer.unpack(s)[0]
return pack, unpack
int64_pack, int64_unpack = _make_packer('>q')
int32_pack, int32_unpack = _make_packer('>i')
int8_pack, int8_unpack = _make_packer('>b')
uint64_pack, uint64_unpack = _make_packer('>Q')
uint8_pack, uint8_unpack = _make_packer('>B')
float_pack, float_unpack = _make_packer('>f')
double_pack, double_unpack = _make_packer('>d')
class GraphBinaryTypeType(type):
def __new__(mcs, name, bases, dct):
cls = super(GraphBinaryTypeType, mcs).__new__(mcs, name, bases, dct)
if not name.startswith('_'):
if cls.python_type:
_serializers[cls.python_type] = cls
if cls.graphbinary_type:
_deserializers[cls.graphbinary_type] = cls
return cls
class GraphBinaryWriter(object):
def __init__(self, serializer_map=None):
self.serializers = _serializers.copy()
if serializer_map:
self.serializers.update(serializer_map)
def writeObject(self, objectData):
return self.toDict(objectData)
def toDict(self, obj, to_extend=None):
if to_extend is None:
to_extend = bytearray()
if obj is None:
to_extend.extend(NULL_BYTES)
return
try:
t = type(obj)
return self.serializers[t].dictify(obj, self, to_extend)
except KeyError:
for key, serializer in self.serializers.items():
if isinstance(obj, key):
return serializer.dictify(obj, self, to_extend)
if isinstance(obj, dict):
return dict((self.toDict(k, to_extend), self.toDict(v, to_extend)) for k, v in obj.items())
elif isinstance(obj, set):
return set([self.toDict(o, to_extend) for o in obj])
elif isinstance(obj, list):
return [self.toDict(o, to_extend) for o in obj]
else:
return obj
class GraphBinaryReader(object):
def __init__(self, deserializer_map=None):
self.deserializers = _deserializers.copy()
if deserializer_map:
self.deserializers.update(deserializer_map)
def readObject(self, b):
if isinstance(b, bytearray):
return self.toObject(io.BytesIO(b))
elif isinstance(b, io.BufferedIOBase):
return self.toObject(b)
def toObject(self, buff, data_type=None, nullable=True):
if data_type is None:
bt = uint8_unpack(buff.read(1))
if bt == DataType.null.value:
if nullable:
buff.read(1)
return None
return self.deserializers[DataType(bt)].objectify(buff, self, nullable)
else:
return self.deserializers[data_type].objectify(buff, self, nullable)
@six.add_metaclass(GraphBinaryTypeType)
class _GraphBinaryTypeIO(object):
python_type = None
graphbinary_type = None
symbolMap = {"global_": "global", "as_": "as", "in_": "in", "and_": "and",
"or_": "or", "is_": "is", "not_": "not", "from_": "from",
"set_": "set", "list_": "list", "all_": "all", "with_": "with",
"filter_": "filter", "id_": "id", "max_": "max", "min_": "min", "sum_": "sum"}
@classmethod
def prefix_bytes(cls, graphbin_type, as_value=False, nullable=True, to_extend=None):
if to_extend is None:
to_extend = bytearray()
if not as_value:
to_extend += uint8_pack(graphbin_type.value)
if nullable:
to_extend += int8_pack(0)
return to_extend
@classmethod
def read_int(cls, buff):
return int32_unpack(buff.read(4))
@classmethod
def unmangle_keyword(cls, symbol):
return cls.symbolMap.get(symbol, symbol)
@classmethod
def is_null(cls, buff, reader, else_opt, nullable=True):
return None if nullable and buff.read(1)[0] == 0x01 else else_opt(buff, reader)
def dictify(self, obj, writer, to_extend, as_value=False, nullable=True):
raise NotImplementedError()
def objectify(self, d, reader, nullable=True):
raise NotImplementedError()
class LongIO(_GraphBinaryTypeIO):
python_type = LongType
graphbinary_type = DataType.long
byte_format_pack = int64_pack
byte_format_unpack = int64_unpack
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
if obj < -9223372036854775808 or obj > 9223372036854775807:
raise Exception("TODO: don't forget bigint")
else:
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(cls.byte_format_pack(obj))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: int64_unpack(buff.read(8)), nullable)
class IntIO(LongIO):
python_type = IntType
graphbinary_type = DataType.int
byte_format_pack = int32_pack
byte_format_unpack = int32_unpack
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: cls.read_int(b), nullable)
class DateIO(_GraphBinaryTypeIO):
python_type = datetime.datetime
graphbinary_type = DataType.date
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
try:
timestamp_seconds = calendar.timegm(obj.utctimetuple())
pts = timestamp_seconds * 1e3 + getattr(obj, 'microsecond', 0) / 1e3
except AttributeError:
pts = calendar.timegm(obj.timetuple()) * 1e3
ts = int(round(pts))
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int64_pack(ts))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader,
lambda b, r: datetime.datetime.utcfromtimestamp(int64_unpack(b.read(8)) / 1000.0),
nullable)
# Based on current implementation, this class must always be declared before FloatIO.
# Seems pretty fragile for future maintainers. Maybe look into this.
class TimestampIO(_GraphBinaryTypeIO):
python_type = statics.timestamp
graphbinary_type = DataType.timestamp
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
# Java timestamp expects milliseconds integer - Have to use int because of legacy Python
ts = int(round(obj * 1000))
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int64_pack(ts))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
# Python timestamp expects seconds
return cls.is_null(buff, reader, lambda b, r: statics.timestamp(int64_unpack(b.read(8)) / 1000.0),
nullable)
def _long_bits_to_double(bits):
return unpack('d', pack('Q', bits))[0]
NAN = _long_bits_to_double(0x7ff8000000000000)
POSITIVE_INFINITY = _long_bits_to_double(0x7ff0000000000000)
NEGATIVE_INFINITY = _long_bits_to_double(0xFff0000000000000)
class FloatIO(LongIO):
python_type = FloatType
graphbinary_type = DataType.float
graphbinary_base_type = DataType.float
byte_format_pack = float_pack
byte_format_unpack = float_unpack
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
if math.isnan(obj):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(cls.byte_format_pack(NAN))
elif math.isinf(obj) and obj > 0:
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(cls.byte_format_pack(POSITIVE_INFINITY))
elif math.isinf(obj) and obj < 0:
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(cls.byte_format_pack(NEGATIVE_INFINITY))
else:
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(cls.byte_format_pack(obj))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: float_unpack(b.read(4)), nullable)
class DoubleIO(FloatIO):
"""
Floats basically just fall through to double serialization.
"""
graphbinary_type = DataType.double
graphbinary_base_type = DataType.double
byte_format_pack = double_pack
byte_format_unpack = double_unpack
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: double_unpack(b.read(8)), nullable)
class CharIO(_GraphBinaryTypeIO):
python_type = SingleChar
graphbinary_type = DataType.char
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(obj.encode("utf-8"))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_char, nullable)
@classmethod
def _read_char(cls, b, r):
max_bytes = 4
x = b.read(1)
while max_bytes > 0:
max_bytes = max_bytes - 1
try:
return x.decode("utf-8")
except UnicodeDecodeError:
x += b.read(1)
class StringIO(_GraphBinaryTypeIO):
python_type = str
graphbinary_type = DataType.string
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
str_bytes = obj.encode("utf-8")
to_extend += int32_pack(len(str_bytes))
to_extend += str_bytes
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: b.read(cls.read_int(b)).decode("utf-8"), nullable)
class ListIO(_GraphBinaryTypeIO):
python_type = list
graphbinary_type = DataType.list
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int32_pack(len(obj)))
for item in obj:
writer.toDict(item, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_list, nullable)
@classmethod
def _read_list(cls, b, r):
size = cls.read_int(b)
the_list = []
while size > 0:
the_list.append(r.readObject(b))
size = size - 1
return the_list
class SetDeserializer(ListIO):
python_type = SetType
graphbinary_type = DataType.set
@classmethod
def objectify(cls, buff, reader, nullable=True):
return set(ListIO.objectify(buff, reader, nullable))
class MapIO(_GraphBinaryTypeIO):
python_type = DictType
graphbinary_type = DataType.map
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int32_pack(len(obj)))
for k, v in obj.items():
writer.toDict(k, to_extend)
writer.toDict(v, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_map, nullable)
@classmethod
def _read_map(cls, b, r):
size = cls.read_int(b)
the_dict = {}
while size > 0:
k = HashableDict.of(r.readObject(b))
v = r.readObject(b)
the_dict[k] = v
size = size - 1
return the_dict
class UuidIO(_GraphBinaryTypeIO):
python_type = uuid.UUID
graphbinary_type = DataType.uuid
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(obj.bytes)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: uuid.UUID(bytes=b.read(16)), nullable)
class EdgeIO(_GraphBinaryTypeIO):
python_type = Edge
graphbinary_type = DataType.edge
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
writer.toDict(obj.id, to_extend)
StringIO.dictify(obj.label, writer, to_extend, True, False)
writer.toDict(obj.inV.id, to_extend)
StringIO.dictify(obj.inV.label, writer, to_extend, True, False)
writer.toDict(obj.outV.id, to_extend)
StringIO.dictify(obj.outV.label, writer, to_extend, True, False)
to_extend.extend(NULL_BYTES)
to_extend.extend(NULL_BYTES)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_edge, nullable)
@classmethod
def _read_edge(cls, b, r):
edgeid = r.readObject(b)
edgelbl = r.toObject(b, DataType.string, False)
inv = Vertex(r.readObject(b), r.toObject(b, DataType.string, False))
outv = Vertex(r.readObject(b), r.toObject(b, DataType.string, False))
edge = Edge(edgeid, outv, edgelbl, inv)
b.read(4)
return edge
class PathIO(_GraphBinaryTypeIO):
python_type = Path
graphbinary_type = DataType.path
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
writer.toDict(obj.labels, to_extend)
writer.toDict(obj.objects, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: Path(r.readObject(b), r.readObject(b)), nullable)
class PropertyIO(_GraphBinaryTypeIO):
python_type = Property
graphbinary_type = DataType.property
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(obj.key, writer, to_extend, True, False)
writer.toDict(obj.value, to_extend)
to_extend.extend(NULL_BYTES)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_property, nullable)
@classmethod
def _read_property(cls, b, r):
p = Property(r.toObject(b, DataType.string, False), r.readObject(b), None)
b.read(2)
return p
class TinkerGraphIO(_GraphBinaryTypeIO):
python_type = Graph
graphbinary_type = DataType.graph
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
raise AttributeError("TinkerGraph serialization is not currently supported by gremlin-python")
@classmethod
def objectify(cls, b, reader, as_value=False):
raise AttributeError("TinkerGraph deserialization is not currently supported by gremlin-python")
class VertexIO(_GraphBinaryTypeIO):
python_type = Vertex
graphbinary_type = DataType.vertex
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
writer.toDict(obj.id, to_extend)
StringIO.dictify(obj.label, writer, to_extend, True, False)
to_extend.extend(NULL_BYTES)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_vertex, nullable)
@classmethod
def _read_vertex(cls, b, r):
vertex = Vertex(r.readObject(b), r.toObject(b, DataType.string, False))
b.read(2)
return vertex
class VertexPropertyIO(_GraphBinaryTypeIO):
python_type = VertexProperty
graphbinary_type = DataType.vertexproperty
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
writer.toDict(obj.id, to_extend)
StringIO.dictify(obj.label, writer, to_extend, True, False)
writer.toDict(obj.value, to_extend)
to_extend.extend(NULL_BYTES)
to_extend.extend(NULL_BYTES)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_vertexproperty, nullable)
@classmethod
def _read_vertexproperty(cls, b, r):
vp = VertexProperty(r.readObject(b), r.toObject(b, DataType.string, False), r.readObject(b), None)
b.read(4)
return vp
class _EnumIO(_GraphBinaryTypeIO):
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(cls.unmangle_keyword(str(obj.name)), writer, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_enumval, nullable)
@classmethod
def _read_enumval(cls, b, r):
enum_name = r.toObject(b)
return cls.python_type[enum_name]
class BarrierIO(_EnumIO):
graphbinary_type = DataType.barrier
python_type = Barrier
class CardinalityIO(_EnumIO):
graphbinary_type = DataType.cardinality
python_type = Cardinality
class ColumnIO(_EnumIO):
graphbinary_type = DataType.column
python_type = Column
class DirectionIO(_EnumIO):
graphbinary_type = DataType.direction
python_type = Direction
class OperatorIO(_EnumIO):
graphbinary_type = DataType.operator
python_type = Operator
class OrderIO(_EnumIO):
graphbinary_type = DataType.order
python_type = Order
class PickIO(_EnumIO):
graphbinary_type = DataType.pick
python_type = Pick
class PopIO(_EnumIO):
graphbinary_type = DataType.pop
python_type = Pop
class BindingIO(_GraphBinaryTypeIO):
python_type = Binding
graphbinary_type = DataType.binding
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(obj.key, writer, to_extend, True, False)
writer.toDict(obj.value, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, lambda b, r: Binding(r.toObject(b, DataType.string, False),
reader.readObject(b)), nullable)
class BytecodeIO(_GraphBinaryTypeIO):
python_type = Bytecode
graphbinary_type = DataType.bytecode
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
bc = obj.bytecode if isinstance(obj, Traversal) else obj
to_extend.extend(int32_pack(len(bc.step_instructions)))
for inst in bc.step_instructions:
inst_name, inst_args = inst[0], inst[1:] if len(inst) > 1 else []
StringIO.dictify(inst_name, writer, to_extend, True, False)
to_extend.extend(int32_pack(len(inst_args)))
for arg in inst_args:
writer.toDict(arg, to_extend)
to_extend.extend(int32_pack(len(bc.source_instructions)))
for inst in bc.source_instructions:
inst_name, inst_args = inst[0], inst[1:] if len(inst) > 1 else []
StringIO.dictify(inst_name, writer, to_extend, True, False)
to_extend.extend(int32_pack(len(inst_args)))
for arg in inst_args:
if isinstance(arg, TypeType):
writer.toDict(GremlinType(arg().fqcn), to_extend)
else:
writer.toDict(arg, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_bytecode, nullable)
@classmethod
def _read_bytecode(cls, b, r):
bytecode = Bytecode()
step_count = cls.read_int(b)
ix = 0
while ix < step_count:
inst = [r.toObject(b, DataType.string, False)]
inst_ct = cls.read_int(b)
iy = 0
while iy < inst_ct:
inst.append(r.readObject(b))
iy += 1
bytecode.step_instructions.append(inst)
ix += 1
source_count = cls.read_int(b)
ix = 0
while ix < source_count:
inst = [r.toObject(b, DataType.string, False)]
inst_ct = cls.read_int(b)
iy = 0
while iy < inst_ct:
inst.append(r.readObject(b))
iy += 1
bytecode.source_instructions.append(inst)
ix += 1
return bytecode
class TraversalIO(BytecodeIO):
python_type = GraphTraversal
class LambdaSerializer(_GraphBinaryTypeIO):
python_type = FunctionType
graphbinary_type = DataType.lambda_
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
lambda_result = obj()
script = lambda_result if isinstance(lambda_result, str) else lambda_result[0]
language = statics.default_lambda_language if isinstance(lambda_result, str) else lambda_result[1]
StringIO.dictify(language, writer, to_extend, True, False)
script_cleaned = script
script_args = -1
if language == "gremlin-groovy" and "->" in script:
# if the user has explicitly added parameters to the groovy closure then we can easily detect one or two
# arg lambdas - if we can't detect 1 or 2 then we just go with "unknown"
args = script[0:script.find("->")]
script_args = 2 if "," in args else 1
StringIO.dictify(script_cleaned, writer, to_extend, True, False)
to_extend.extend(int32_pack(script_args))
return to_extend
class PSerializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.p
python_type = P
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(obj.operator, writer, to_extend, True, False)
args = []
if obj.other is None:
if isinstance(obj.value, ListType):
args = obj.value
else:
args.append(obj.value)
else:
args.append(obj.value)
args.append(obj.other)
to_extend.extend(int32_pack(len(args)))
for a in args:
writer.toDict(a, to_extend)
return to_extend
class ScopeIO(_EnumIO):
graphbinary_type = DataType.scope
python_type = Scope
class TIO(_EnumIO):
graphbinary_type = DataType.t
python_type = T
class TraverserIO(_GraphBinaryTypeIO):
graphbinary_type = DataType.traverser
python_type = Traverser
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int64_pack(obj.bulk))
writer.toDict(obj.object, to_extend)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_traverser, nullable)
@classmethod
def _read_traverser(cls, b, r):
bulk = int64_unpack(b.read(8))
obj = r.readObject(b)
return Traverser(obj, bulk=bulk)
class ByteIO(_GraphBinaryTypeIO):
python_type = SingleByte
graphbinary_type = DataType.byte
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int8_pack(obj))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader,
lambda b, r: int.__new__(SingleByte, int8_unpack(b.read(1))),
nullable)
class ByteBufferIO(_GraphBinaryTypeIO):
python_type = ByteBufferType
graphbinary_type = DataType.bytebuffer
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int32_pack(len(obj)))
to_extend.extend(obj)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_bytebuffer, nullable)
@classmethod
def _read_bytebuffer(cls, b, r):
size = cls.read_int(b)
return ByteBufferType(b.read(size))
class BooleanIO(_GraphBinaryTypeIO):
python_type = bool
graphbinary_type = DataType.boolean
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
to_extend.extend(int8_pack(0x01 if obj else 0x00))
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader,
lambda b, r: True if int8_unpack(b.read(1)) == 0x01 else False,
nullable)
class TextPSerializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.textp
python_type = TextP
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(obj.operator, writer, to_extend, True, False)
args = []
if obj.other is None:
if isinstance(obj.value, ListType):
args = obj.value
else:
args.append(obj.value)
else:
args.append(obj.value)
args.append(obj.other)
to_extend.extend(int32_pack(len(args)))
for a in args:
writer.toDict(a, to_extend)
return to_extend
class BulkSetDeserializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.bulkset
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_bulkset, nullable)
@classmethod
def _read_bulkset(cls, b, r):
size = cls.read_int(b)
the_list = []
while size > 0:
itm = r.readObject(b)
bulk = int64_unpack(b.read(8))
for y in range(bulk):
the_list.append(itm)
size = size - 1
return the_list
class MetricsDeserializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.metrics
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_metrics, nullable)
@classmethod
def _read_metrics(cls, b, r):
metricid = r.toObject(b, DataType.string, False)
name = r.toObject(b, DataType.string, False)
duration = r.toObject(b, DataType.long, nullable=False)
counts = r.toObject(b, DataType.map, nullable=False)
annotations = r.toObject(b, DataType.map, nullable=False)
metrics = r.toObject(b, DataType.list, nullable=False)
return {"id": metricid,
"name": name,
"dur": duration,
"counts": counts,
"annotations": annotations,
"metrics": metrics}
class TraversalMetricsDeserializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.traversalmetrics
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_traversalmetrics, nullable)
@classmethod
def _read_traversalmetrics(cls, b, r):
duration = r.toObject(b, DataType.long, nullable=False)
metrics = r.toObject(b, DataType.list, nullable=False)
return {"dur": duration,
"metrics": metrics}
class ClassSerializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.clazz
python_type = GremlinType
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
StringIO.dictify(obj.gremlin_type, writer, to_extend, True, False)
return to_extend
class TraversalStrategySerializer(_GraphBinaryTypeIO):
graphbinary_type = DataType.traversalstrategy
python_type = TraversalStrategy
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
ClassSerializer.dictify(GremlinType(obj.fqcn), writer, to_extend, True, False)
conf = {k: cls._convert(v) for k, v in obj.configuration.items()}
MapIO.dictify(conf, writer, to_extend, True, False)
return to_extend
@classmethod
def _convert(cls, v):
return v.bytecode if isinstance(v, Traversal) else v
class DurationIO(_GraphBinaryTypeIO):
python_type = timedelta
graphbinary_type = DataType.duration
@classmethod
def dictify(cls, obj, writer, to_extend, as_value=False, nullable=True):
cls.prefix_bytes(cls.graphbinary_type, as_value, nullable, to_extend)
LongIO.dictify(obj.seconds, writer, to_extend, True, False)
IntIO.dictify(obj.microseconds * 1000, writer, to_extend, True, False)
return to_extend
@classmethod
def objectify(cls, buff, reader, nullable=True):
return cls.is_null(buff, reader, cls._read_duration, nullable)
@classmethod
def _read_duration(cls, b, r):
seconds = r.toObject(b, DataType.long, False)
nanos = r.toObject(b, DataType.int, False)
return timedelta(seconds=seconds, microseconds=nanos / 1000)
| {
"content_hash": "b80aea52105aa1e6388dbbb7f79720c7",
"timestamp": "",
"source": "github",
"line_count": 1080,
"max_line_length": 119,
"avg_line_length": 32.15648148148148,
"alnum_prop": 0.632439747761237,
"repo_name": "apache/incubator-tinkerpop",
"id": "a5a258a6429383e2c963265370b313bc205ce96a",
"size": "34729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gremlin-python/src/main/python/gremlin_python/structure/io/graphbinaryV1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4544"
},
{
"name": "Groovy",
"bytes": "369370"
},
{
"name": "Java",
"bytes": "6510259"
},
{
"name": "Python",
"bytes": "1481"
},
{
"name": "Shell",
"bytes": "24104"
}
],
"symlink_target": ""
} |
"""Switch between depending on pyglib.app or an OSS replacement."""
from __future__ import absolute_import
# pylint: disable=unused-import
# pylint: disable=g-import-not-at-top
# pylint: disable=wildcard-import
import tensorflow.python.platform
from . import control_imports
if control_imports.USE_OSS and control_imports.OSS_APP:
from tensorflow.python.platform.default._app import *
else:
from tensorflow.python.platform.google._app import *
# Import 'flags' into this module
from tensorflow.python.platform import flags
| {
"content_hash": "848d90bad230ae5bb20ec607070cef2d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 67,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.7859848484848485,
"repo_name": "aksaxena80/test",
"id": "7186d6e0b5515001d35884a8a93a1dcbf90ff60e",
"size": "528",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/platform/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127080"
},
{
"name": "C++",
"bytes": "4875335"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "631255"
},
{
"name": "Java",
"bytes": "44192"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "44898"
},
{
"name": "Python",
"bytes": "2425565"
},
{
"name": "Shell",
"bytes": "1036"
},
{
"name": "TypeScript",
"bytes": "236089"
}
],
"symlink_target": ""
} |
"""
conference.py -- Udacity conference server-side Python App Engine API;
uses Google Cloud Endpoints
$Id: conference.py,v 1.25 2014/05/24 23:42:19 wesc Exp wesc $
created by wesc on 2014 apr 21
"""
__author__ = 'wesc+api@google.com (Wesley Chun)'
from datetime import datetime
import endpoints
import logging
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.ext import ndb
from models import ConflictException
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import StringMessage
from models import BooleanMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForm
from models import ConferenceQueryForms
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionForms
from settings import WEB_CLIENT_ID
from settings import ANDROID_CLIENT_ID
from settings import IOS_CLIENT_ID
from settings import ANDROID_AUDIENCE
from utils import getUserId
import operator
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
MEMCACHE_ANNOUNCEMENTS_KEY = "RECENT_ANNOUNCEMENTS"
ANNOUNCEMENT_TPL = ('Last chance to attend! The following conferences '
'are nearly sold out: %s')
MEMCACHE_FEATURED_KEY = "FEATURED_SPEAKER"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
DEFAULTS = {
"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"],
}
SESSION_DEFAULTS = {
"highlights": "highlights about the session",
"speaker": "Speaker name",
"duration": "1:00", # hour then minutes.
"typeOfSession": ["no", "type"],
"date": "2016-06-06",
# hour then minutes in 24 hour formate (no am and pm).
"startTime": "12:00",
}
OPERATORS = {
'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='
}
FIELDS = {
'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees',
}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
CONF_POST_REQUEST = endpoints.ResourceContainer(
ConferenceForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_POST_REQUEST = endpoints.ResourceContainer(
SessionForm,
websafeConferenceKey=messages.StringField(1),
)
SESS_GET_REQUEST = endpoints.ResourceContainer(
websafeConferenceKey=messages.StringField(1),
)
SESS_GET_REQUEST_TYPE = endpoints.ResourceContainer(
websafeConferenceKey=messages.StringField(1),
typeOfSession=messages.StringField(2),
)
SESS_GET_REQUEST_SPEAKER = endpoints.ResourceContainer(
speaker=messages.StringField(1),
)
WISHLIST_POST_REQUEST = endpoints.ResourceContainer(
websafeSessionKey=messages.StringField(1),
)
SESS_GET_REQUEST_TYPE_TIME = endpoints.ResourceContainer(
typeOfSession=messages.StringField(2),
startTime=messages.StringField(3),
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
@endpoints.api(name='conference', version='v1', audiences=[ANDROID_AUDIENCE],
allowed_client_ids=[
WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID, ANDROID_CLIENT_ID, IOS_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Conference objects - - - - - - - - - - - - - - - - -
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# convert Date to date string; just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _createConferenceObject(self, request):
"""Create or update Conference object, returning ConferenceForm/request."""
# preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
if not request.name:
raise endpoints.BadRequestException(
"Conference 'name' field required")
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# add default values for those missing (both data model & outbound
# Message)
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# convert dates from strings to Date objects; set month based on
# start_date
if data['startDate']:
data['startDate'] = datetime.strptime(
data['startDate'][:10], "%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(
data['endDate'][:10], "%Y-%m-%d").date()
# set seatsAvailable to be same as maxAttendees on creation
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
# generate Profile Key based on user ID and Conference
# ID based on Profile key get Conference key from ID
p_key = ndb.Key(Profile, user_id)
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# create Conference, send email to organizer confirming
# creation of Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email'
)
return request
@ndb.transactional()
def _updateConferenceObject(self, request):
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
# update existing conference
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
# check that conference exists
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
# check that user is owner
if user_id != conf.organizerUserId:
raise endpoints.ForbiddenException(
'Only the owner can update the conference.')
# Not getting all the fields, so don't create a new object; just
# copy relevant fields from ConferenceForm to Conference object
for field in request.all_fields():
data = getattr(request, field.name)
# only copy fields where we get data
if data not in (None, []):
# special handling for dates (convert string to Date)
if field.name in ('startDate', 'endDate'):
data = datetime.strptime(data, "%Y-%m-%d").date()
if field.name == 'startDate':
conf.month = data.month
# write to Conference object
setattr(conf, field.name, data)
conf.put()
prof = ndb.Key(Profile, user_id).get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(ConferenceForm, ConferenceForm, path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
@endpoints.method(CONF_POST_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='PUT', name='updateConference')
def updateConference(self, request):
"""Update conference w/provided fields & return w/updated info."""
return self._updateConferenceObject(request)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
# get Conference object from request; bail if not found
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % request.websafeConferenceKey)
prof = conf.key.parent().get()
# return ConferenceForm
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# create ancestor query for all key matches for this user
confs = Conference.query(ancestor=ndb.Key(Profile, user_id))
prof = ndb.Key(Profile, user_id).get()
# return set of ConferenceForm objects per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(
conf, getattr(prof, 'displayName')) for conf in confs]
)
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(
filtr["field"], filtr["operator"], filtr["value"])
q = q.filter(formatted_query)
return q
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name)
for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException(
"Filter contains invalid field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous filters
# disallow the filter if inequality was performed on a different field before
# track the field on which the inequality operation is
# performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException(
"Inequality filter is allowed on only one field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST',
name='queryConferences')
def queryConferences(self, request):
"""Query for conferences."""
conferences = self._getQuery(request)
# need to fetch organiser displayName from profiles
# get all keys and use get_multi for speed
organisers = [(ndb.Key(Profile, conf.organizerUserId))
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return individual ConferenceForm object per Conference
return ConferenceForms(
items=[self._copyConferenceToForm(conf, names[conf.organizerUserId]) for conf in
conferences]
)
# - - - Profile objects - - - - - - - - - - - - - - - - - - -
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
# copy relevant fields from Profile to ProfileForm
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# convert t-shirt string to Enum; just copy others
if field.name == 'teeShirtSize':
setattr(
pf, field.name, getattr(TeeShirtSize, getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _getProfileFromUser(self):
"""Return user Profile from datastore, creating new one if non-existent."""
# make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# get Profile from datastore
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
# create new Profile if not there
if not profile:
profile = Profile(
key=p_key,
displayName=user.nickname(),
mainEmail=user.email(),
teeShirtSize=str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile # return Profile
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# get user Profile
prof = self._getProfileFromUser()
# if saveProfile(), process user-modifyable fields
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# if field == 'teeShirtSize':
# setattr(prof, field, str(val).upper())
# else:
# setattr(prof, field, val)
prof.put()
# return ProfileForm
return self._copyProfileToForm(prof)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile', http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile', http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Update & return user profile."""
return self._doProfile(request)
# - - - Announcements - - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = ANNOUNCEMENT_TPL % (
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
return StringMessage(data=memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY) or "")
# - - - Registration - - - - - - - - - - - - - - - - - - - -
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# register
if reg:
# check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# check if seats avail
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# unregister
else:
# check if user already registered
if wsck in prof.conferenceKeysToAttend:
# unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser() # get user Profile
conf_keys = [ndb.Key(urlsafe=wsck)
for wsck in prof.conferenceKeysToAttend]
conferences = ndb.get_multi(conf_keys)
# get organizers
organisers = [ndb.Key(Profile, conf.organizerUserId)
for conf in conferences]
profiles = ndb.get_multi(organisers)
# put display names in a dict for easier fetching
names = {}
for profile in profiles:
names[profile.key.id()] = profile.displayName
# return set of ConferenceForm objects per Conference
return ConferenceForms(items=[self._copyConferenceToForm(conf, names[conf.organizerUserId])
for conf in conferences]
)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='DELETE', name='unregisterFromConference')
def unregisterFromConference(self, request):
"""Unregister user for selected conference."""
return self._conferenceRegistration(request, reg=False)
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='filterPlayground',
http_method='GET', name='filterPlayground')
def filterPlayground(self, request):
"""Filter Playground"""
q = Conference.query()
# field = "city"
# operator = "="
# value = "London"
# f = ndb.query.FilterNode(field, operator, value)
# q = q.filter(f)
q = q.filter(Conference.city == "London")
q = q.filter(Conference.topics == "Medical Innovations")
q = q.filter(Conference.month == 6)
return ConferenceForms(
items=[self._copyConferenceToForm(conf, "") for conf in q]
)
# - - - Sessions - - - - - - - - -
@endpoints.method(SESS_GET_REQUEST_SPEAKER, SessionForms,
path='sessions/speaker',
http_method='GET', name='getSessionsBySpeaker')
def getSessionsBySpeaker(self, request):
"""get sessions using the speaker"""
return self._getSessions(request)
@endpoints.method(SESS_GET_REQUEST_TYPE, SessionForms,
path='conference/sessions/type/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""get sessions in a conference using the type of session"""
return self._getSessions(request)
def _copySessionToForm(self, session):
return SessionForm(sessionName=session.sessionName,
highlights=session.highlights,
speaker=session.speaker,
duration=str(session.duration),
typeOfSession=session.typeOfSession,
date=str(session.date),
startTime=str(session.startTime),
conferenceId=session.conferenceId,
websafeKey=session.key.urlsafe())
def _getSessions(self, request):
if hasattr(request, 'websafeConferenceKey'):
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
q = Session.query(ancestor=conf_key)
else:
q = Session.query()
if hasattr(request, 'typeOfSession'):
q = q.filter(Session.typeOfSession == request.typeOfSession)
if hasattr(request, 'speaker'):
q = q.filter(Session.speaker == request.speaker)
if hasattr(request, 'startTime'):
startTime = datetime.strptime(
request.startTime, "%H:%M").time()
q = q.filter(Session.startTime <= startTime)
return SessionForms(
items=[self._copySessionToForm(session) for session in q]
)
def _getSessionsByTime(self, request):
q = Session.query()
if hasattr(request, 'typeOfSession'):
q = q.filter(Session.typeOfSession != request.typeOfSession)
sessions = []
startTime = datetime.strptime(request.startTime, "%H:%M").time()
for session in q:
if session.startTime <= startTime:
sessions.append(session)
return SessionForms(
items=[self._copySessionToForm(session)
for session in sessions]
)
@endpoints.method(SESS_GET_REQUEST, SessionForms,
path='conference/sessions/{websafeConferenceKey}',
http_method='GET', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""get sessions in a conference"""
return self._getSessions(request)
@endpoints.method(SESS_POST_REQUEST, BooleanMessage,
path='conference/create_session/{websafeConferenceKey}',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session in conference"""
return self._createSession(request)
def _createSession(self, request):
# to use later as a prent for the session
conf_key = ndb.Key(urlsafe=request.websafeConferenceKey)
conf = conf_key.get()
if not request.sessionName:
raise endpoints.BadRequestException(
"Session 'name' field required")
user = endpoints.get_current_user()
user_id = getUserId(user)
if conf.organizerUserId != user_id:
raise endpoints.UnauthorizedException('Authorization required')
# # copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
for df in SESSION_DEFAULTS:
if data[df] in (None, []):
data[df] = SESSION_DEFAULTS[df]
# setattr(request, df, DEFAULTS[df])
del data['websafeConferenceKey']
del data['websafeKey']
# TODO set defaults
if data['date']:
data['date'] = datetime.strptime(
data['date'][:10], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(
data['startTime'], "%H:%M").time()
if data['duration']:
data['duration'] = datetime.strptime(
data['duration'], "%H:%M").time()
s_id = Session.allocate_ids(size=1, parent=conf_key)[0]
session_key = ndb.Key(Session, s_id, parent=conf_key)
data['key'] = session_key
data['conferenceId'] = conf_key.id()
Session(**data).put()
taskqueue.add(url='/tasks/featured_speaker',
params={'conference_key': request.websafeConferenceKey,
'speaker_name': request.speaker})
return BooleanMessage(data=True)
@endpoints.method(WISHLIST_POST_REQUEST, BooleanMessage,
path='conference/sessions/add_to_user/{websafeSessionKey}',
http_method='POST', name='addSessionToUserWishlist')
def addSessionToUserWishlist(self, request):
"""add certain session to user wishlist"""
return self._sessionWishList(request, True)
@endpoints.method(WISHLIST_POST_REQUEST, BooleanMessage,
path='conference/sessions/delete_from_user/{websafeSessionKey}',
http_method='DELETE',
name='deleteSessionFromUserWishlist')
def deleteSessionFromUserWishlist(self, request):
"""delete certain session from user wishlist"""
return self._sessionWishList(request, False)
def _sessionWishList(self, request, add=True):
retval = None
prof = self._getProfileFromUser() # get user Profile
# check if conf exists given websafeConfKey
# get conference; check that it exists
wssk = request.websafeSessionKey
session = ndb.Key(urlsafe=wssk).get()
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % wssk)
# add
if add:
# check if user already has the session
if wssk in prof.sessionKeysWishlist:
raise ConflictException(
"You have already this session in your wishlist")
prof.sessionKeysWishlist.append(wssk)
retval = True
# delete
else:
# check if user has the session in wishlist
if wssk in prof.sessionKeysWishlist:
# delete the session
prof.sessionKeysWishlist.remove(wssk)
retval = True
else:
retval = False
# write things back to the datastore & return
prof.put()
return BooleanMessage(data=retval)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='conference/sessions/user_wishlist',
http_method='GET', name='getUserWishList')
def getUserWishList(self, request):
"""Return sessions in user wishlist"""
prof = self._getProfileFromUser() # get user Profile
session_keys = [ndb.Key(urlsafe=wssk)
for wssk in prof.sessionKeysWishlist]
sessions = ndb.get_multi(session_keys)
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions]
)
@endpoints.method(SESS_GET_REQUEST_TYPE_TIME, SessionForms,
path='sessions/type_time',
http_method='GET', name='getSessionsByTypeAndTime')
def getSessionsByTypeAndTime(self, request):
"""Return sessions by it's type and less than start time"""
return self._getSessions(request)
@endpoints.method(SESS_GET_REQUEST_TYPE_TIME, SessionForms,
path='sessions/not_type_time',
http_method='GET', name='getSessionsByNotTypeAndTime')
def getSessionsByNotTypeAndTime(self, request):
"""Return sessions by not a type and start time"""
return self._getSessionsByTime(request)
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/all',
http_method='GET', name='getAllSessions')
def getAllSessions(self, request):
"""Return all sessions created"""
return self._getSessions(request)
@endpoints.method(message_types.VoidMessage, StringMessage,
path='speakers/featured',
http_method='GET', name='getFeaturedSpeakers')
def getFeaturedSpeaker(self, request):
"""Return the speaker with the maximum number of sessions"""
return StringMessage(data=memcache.get(MEMCACHE_FEATURED_KEY) or "")
@staticmethod
def _cacheFeaturedSpeaker(conference_key, speaker_name):
"""Determine featured speaker and save it in memcache
"""
conf_key = ndb.Key(urlsafe=conference_key)
q = Session.query(ancestor=conf_key)
q = q.filter(Session.speaker == speaker_name)
featured_speaker = ""
sessions = ""
count = 0
for session in q:
sessions = sessions + ", " + session.sessionName
count = count+1
if count > 1:
featured_speaker = speaker_name
if featured_speaker:
featured = "Featued Speaker is " + featured_speaker + \
" and his/her sessions " + sessions
logging.info(featured)
memcache.set(MEMCACHE_FEATURED_KEY, featured)
else:
featured = ""
memcache.delete(MEMCACHE_FEATURED_KEY)
return featured
api = endpoints.api_server([ConferenceApi]) # register API
| {
"content_hash": "a14de6842d8da30f19b2a9cce736ae8a",
"timestamp": "",
"source": "github",
"line_count": 830,
"max_line_length": 99,
"avg_line_length": 39.126506024096386,
"alnum_prop": 0.5936566589684372,
"repo_name": "osmdawy/Conference",
"id": "17ca969e791c5e105d7a983e33669f415511e68c",
"size": "32498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conference.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "32778"
},
{
"name": "Python",
"bytes": "41223"
}
],
"symlink_target": ""
} |
"""
Module to hold the encoder model.
"""
from tqdm import tqdm
import tensorflow as tf
import numpy as np
import os
import matplotlib
try:
if os.environ["SSH_CONNECTION"]:
matplotlib.use("Pdf")
except KeyError:
pass
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
# Network Parameters
n_hidden_1 = 512#256 # 1st layer num features
n_hidden_2 = 256#128 # 2nd layer num features
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
multiplied = tf.matmul(x, weights["encoder_h1"])
added = tf.add(multiplied, biases["encoder_b1"])
layer_1 = tf.nn.sigmoid(added)
# Decoder Hidden layer with sigmoid activation #2
multiplied = tf.matmul(layer_1, weights["encoder_h2"])
added = tf.add(multiplied, biases["encoder_b2"])
layer_2 = tf.nn.sigmoid(added)
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
multiplied = tf.matmul(x, weights["decoder_h1"])
added = tf.add(multiplied, biases["decoder_b1"])
layer_1 = tf.nn.sigmoid(added)
# Decoder Hidden layer with sigmoid activation #2
multiplied = tf.matmul(layer_1, weights["decoder_h2"])
added = tf.add(multiplied, biases["decoder_b2"])
layer_2 = tf.nn.sigmoid(added)
return layer_2
# Construct model
print("Constructing the model...")
print(" |-> Constructing the encoder model...")
encoder_op = encoder(X)
print(" |-> Constructing the decoder model...")
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
learning_rate = 0.01
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
# Initializing the variables
init = tf.initialize_all_variables()
# Launch the graph
training_epochs = 20
batch_size = 256
print("Launch the tensorflow session...")
with tf.Session() as sess:
print(" |-> Run the session...")
sess.run(init)
total_batch = int(mnist.train.num_examples/batch_size)
print("TOTAL BATCH: " + str(total_batch))
exit(0)
# Training cycle
print(" |-> Run the training...")
for epoch in tqdm(range(training_epochs)):
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
print("Batches are done: " + str(batch_xs))
print("Batch ys: " + str(batch_ys))
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
print(" |-> Optimization finished.")
# Applying encode and decode over test set
examples_to_show = 10
encode_decode = sess.run(y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28)))
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28)))
f.show()
plt.draw()
plt.waitforbuttonpress()
| {
"content_hash": "8c6a563746c81fb0ffcc33f89b0a6dc0",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 89,
"avg_line_length": 32.08,
"alnum_prop": 0.6638403990024938,
"repo_name": "MaxStrange/swedish_chef",
"id": "fdcf3a6475aa4bc3ca12f37503f5dfebb5a5cbb3",
"size": "4010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statistics/mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "178063"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'DisplayField', fields ['display_format']
db.delete_unique('report_builder_displayfield', ['display_format_id'])
# Changing field 'DisplayField.display_format'
db.alter_column('report_builder_displayfield', 'display_format_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['report_builder.Format'], null=True))
def backwards(self, orm):
# Changing field 'DisplayField.display_format'
db.alter_column('report_builder_displayfield', 'display_format_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['report_builder.Format'], unique=True, null=True))
# Adding unique constraint on 'DisplayField', fields ['display_format']
db.create_unique('report_builder_displayfield', ['display_format_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'report_builder.displayfield': {
'Meta': {'ordering': "['position']", 'object_name': 'DisplayField'},
'aggregate': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'display_format': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['report_builder.Format']", 'null': 'True', 'blank': 'True'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'field_verbose': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'path_verbose': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['report_builder.Report']"}),
'sort': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sort_reverse': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'total': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'width': ('django.db.models.fields.IntegerField', [], {'default': '15'})
},
'report_builder.filterfield': {
'Meta': {'ordering': "['position']", 'object_name': 'FilterField'},
'exclude': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'field': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'field_verbose': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'filter_type': ('django.db.models.fields.CharField', [], {'default': "'icontains'", 'max_length': '20', 'blank': 'True'}),
'filter_value': ('django.db.models.fields.CharField', [], {'max_length': '2000'}),
'filter_value2': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'path_verbose': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['report_builder.Report']"})
},
'report_builder.format': {
'Meta': {'object_name': 'Format'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'string': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'})
},
'report_builder.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'distinct': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'root_model': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'starred': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'report_starred_set'", 'blank': 'True', 'to': "orm['auth.User']"}),
'user_created': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_modified': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'report_modified_set'", 'null': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['report_builder']
| {
"content_hash": "6abddb9a89e62f2283b5c8d9ed856f8b",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 190,
"avg_line_length": 77.32173913043478,
"alnum_prop": 0.5653396311291048,
"repo_name": "amaudy/django-report-builder",
"id": "62979104db78feea7d335815eca555ee045ce70c",
"size": "8916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "report_builder/migrations/0008_auto__add_field_report_description__chg_field_displayfield_display_for.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import feedparser
import reader
from kaikai import Vocab
def pp(*args):
import pprint
out = open(1, 'w', encoding="utf-8", closefd=False)
pprint.pprint(*args, stream=out)
def pr(*args):
out = open(1, 'w', encoding="utf-8", closefd=False)
print(*args, file=out)
FEEDLIST = [
"../newnews/data/tert.am.xml",
"../newnews/data/news.am.xml"
"http://www.news.am/arm/rss/",
"http://www.tert.am/rss/?language=am"
]
def readarticles(feedlist):
articletitles = set()
for feed in feedlist:
f=feedparser.parse(feed)
for e in f.entries:
if e.title in articletitles: continue
articletitles.add(e.title)
yield e.title + " " + e.description
if __name__ == "__main__":
vocab = Vocab()
for content in readarticles(FEEDLIST):
words = reader.getWords(reader.cleanHTML(content))
vocab.update(words)
pp(vocab.words)
pp(vocab.suffixes)
pp(vocab.stems)
| {
"content_hash": "4f26c3e87cc0ac154d383d18b399d35d",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 58,
"avg_line_length": 24.794871794871796,
"alnum_prop": 0.6132368148914168,
"repo_name": "khachik/kaikai",
"id": "28fb35598e87a0f461d001fd2b8cdb02c6ef3427",
"size": "967",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "6635"
}
],
"symlink_target": ""
} |
"""
fs.s3fs
=======
**Currently only avaiable on Python2 due to boto not being available for Python3**
FS subclass accessing files in Amazon S3
This module provides the class 'S3FS', which implements the FS filesystem
interface for objects stored in Amazon Simple Storage Service (S3).
"""
import os
import datetime
import tempfile
from fnmatch import fnmatch
import stat as statinfo
import boto.s3.connection
from boto.s3.prefix import Prefix
from boto.exception import S3ResponseError
from fs.base import *
from fs.path import *
from fs.errors import *
from fs.remote import *
from fs.filelike import LimitBytesFile
from fs import iotools
import six
# Boto is not thread-safe, so we need to use a per-thread S3 connection.
if hasattr(threading,"local"):
thread_local = threading.local
else:
class thread_local(object):
def __init__(self):
self._map = {}
def __getattr__(self,attr):
try:
return self._map[(threading.currentThread(),attr)]
except KeyError:
raise AttributeError, attr
def __setattr__(self,attr,value):
self._map[(threading.currentThread(),attr)] = value
class S3FS(FS):
"""A filesystem stored in Amazon S3.
This class provides the FS interface for files stored in Amazon's Simple
Storage Service (S3). It should be instantiated with the name of the
S3 bucket to use, and optionally a prefix under which the files should
be stored.
Local temporary files are used when opening files from this filesystem,
and any changes are only pushed back into S3 when the files are closed
or flushed.
"""
_meta = {'thread_safe': True,
'virtual': False,
'read_only': False,
'unicode_paths': True,
'case_insensitive_paths': False,
'network': True,
'atomic.move': True,
'atomic.copy': True,
'atomic.makedir': True,
'atomic.rename': False,
'atomic.setcontent': True
}
class meta:
PATH_MAX = None
NAME_MAX = None
def __init__(self, bucket, prefix="", aws_access_key=None, aws_secret_key=None, separator="/", thread_synchronize=True, key_sync_timeout=1):
"""Constructor for S3FS objects.
S3FS objects require the name of the S3 bucket in which to store
files, and can optionally be given a prefix under which the files
should be stored. The AWS public and private keys may be specified
as additional arguments; if they are not specified they will be
read from the two environment variables AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY.
The keyword argument 'key_sync_timeout' specifies the maximum
time in seconds that the filesystem will spend trying to confirm
that a newly-uploaded S3 key is available for reading. For no
timeout set it to zero. To disable these checks entirely (and
thus reduce the filesystem's consistency guarantees to those of
S3's "eventual consistency" model) set it to None.
By default the path separator is "/", but this can be overridden
by specifying the keyword 'separator' in the constructor.
"""
self._bucket_name = bucket
self._access_keys = (aws_access_key,aws_secret_key)
self._separator = separator
self._key_sync_timeout = key_sync_timeout
# Normalise prefix to this form: path/to/files/
prefix = normpath(prefix)
while prefix.startswith(separator):
prefix = prefix[1:]
if not prefix.endswith(separator) and prefix != "":
prefix = prefix + separator
if isinstance(prefix,unicode):
prefix = prefix.encode("utf8")
if aws_access_key is None:
if "AWS_ACCESS_KEY_ID" not in os.environ:
raise CreateFailedError("AWS_ACCESS_KEY_ID not set")
if aws_secret_key is None:
if "AWS_SECRET_ACCESS_KEY" not in os.environ:
raise CreateFailedError("AWS_SECRET_ACCESS_KEY not set")
self._prefix = prefix
self._tlocal = thread_local()
super(S3FS, self).__init__(thread_synchronize=thread_synchronize)
# Make _s3conn and _s3bukt properties that are created on demand,
# since they cannot be stored during pickling.
def _s3conn(self):
try:
(c,ctime) = self._tlocal.s3conn
if time.time() - ctime > 60:
raise AttributeError
return c
except AttributeError:
c = boto.s3.connection.S3Connection(*self._access_keys)
self._tlocal.s3conn = (c,time.time())
return c
_s3conn = property(_s3conn)
def _s3bukt(self):
try:
(b,ctime) = self._tlocal.s3bukt
if time.time() - ctime > 60:
raise AttributeError
return b
except AttributeError:
try:
# Validate by listing the bucket if there is no prefix.
# If there is a prefix, validate by listing only the prefix
# itself, to avoid errors when an IAM policy has been applied.
if self._prefix:
b = self._s3conn.get_bucket(self._bucket_name, validate=0)
b.get_key(self._prefix)
else:
b = self._s3conn.get_bucket(self._bucket_name, validate=1)
except S3ResponseError, e:
if "404 Not Found" not in str(e):
raise
b = self._s3conn.create_bucket(self._bucket_name)
self._tlocal.s3bukt = (b,time.time())
return b
_s3bukt = property(_s3bukt)
def __getstate__(self):
state = super(S3FS,self).__getstate__()
del state['_tlocal']
return state
def __setstate__(self,state):
super(S3FS,self).__setstate__(state)
self._tlocal = thread_local()
def __repr__(self):
args = (self.__class__.__name__,self._bucket_name,self._prefix)
return '<%s: %s:%s>' % args
__str__ = __repr__
def _s3path(self,path):
"""Get the absolute path to a file stored in S3."""
path = relpath(normpath(path))
path = self._separator.join(iteratepath(path))
s3path = self._prefix + path
if s3path and s3path[-1] == self._separator:
s3path = s3path[:-1]
if isinstance(s3path,unicode):
s3path = s3path.encode("utf8")
return s3path
def _uns3path(self,s3path,roots3path=None):
"""Get the local path for a file stored in S3.
This is essentially the opposite of self._s3path().
"""
if roots3path is None:
roots3path = self._s3path("")
i = len(roots3path)
return s3path[i:]
def _sync_key(self,k):
"""Synchronise on contents of the given key.
Since S3 only offers "eventual consistency" of data, it is possible
to create a key but be unable to read it back straight away. This
method works around that limitation by polling the key until it reads
back the value expected by the given key.
Note that this could easily fail if the key is modified by another
program, meaning the content will never be as specified in the given
key. This is the reason for the timeout argument to the construtcor.
"""
timeout = self._key_sync_timeout
if timeout is None:
return k
k2 = self._s3bukt.get_key(k.name)
t = time.time()
while k2 is None or k2.etag != k.etag:
if timeout > 0:
if t + timeout < time.time():
break
time.sleep(0.1)
k2 = self._s3bukt.get_key(k.name)
return k2
def _sync_set_contents(self,key,contents):
"""Synchronously set the contents of a key."""
if isinstance(key,basestring):
key = self._s3bukt.new_key(key)
if isinstance(contents,basestring):
key.set_contents_from_string(contents)
elif hasattr(contents,"md5"):
hexmd5 = contents.md5
b64md5 = hexmd5.decode("hex").encode("base64").strip()
key.set_contents_from_file(contents,md5=(hexmd5,b64md5))
else:
try:
contents.seek(0)
except (AttributeError,EnvironmentError):
tf = tempfile.TemporaryFile()
data = contents.read(524288)
while data:
tf.write(data)
data = contents.read(524288)
tf.seek(0)
key.set_contents_from_file(tf)
else:
key.set_contents_from_file(contents)
return self._sync_key(key)
def makepublic(self, path):
"""Mark given path as publicly accessible using HTTP(S)"""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
k.make_public()
def getpathurl(self, path, allow_none=False, expires=3600):
"""Returns a url that corresponds to the given path."""
s3path = self._s3path(path)
k = self._s3bukt.get_key(s3path)
# Is there AllUsers group with READ permissions?
is_public = True in [grant.permission == 'READ' and
grant.uri == 'http://acs.amazonaws.com/groups/global/AllUsers'
for grant in k.get_acl().acl.grants]
url = k.generate_url(expires, force_http=is_public)
if url == None:
if not allow_none:
raise NoPathURLError(path=path)
return None
if is_public:
# Strip time token; it has no sense for public resource
url = url.split('?')[0]
return url
def setcontents(self, path, data=b'', encoding=None, errors=None, chunk_size=64*1024):
s3path = self._s3path(path)
if isinstance(data, six.text_type):
data = data.encode(encoding=encoding, errors=errors)
self._sync_set_contents(s3path, data)
@iotools.filelike_to_stream
def open(self, path, mode='r', buffering=-1, encoding=None, errors=None, newline=None, line_buffering=False, **kwargs):
"""Open the named file in the given mode.
This method downloads the file contents into a local temporary file
so that it can be worked on efficiently. Any changes made to the
file are only sent back to S3 when the file is flushed or closed.
"""
if self.isdir(path):
raise ResourceInvalidError(path)
s3path = self._s3path(path)
# Truncate the file if requested
if "w" in mode:
k = self._sync_set_contents(s3path,"")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
# Create the file if it's missing
if "w" not in mode and "a" not in mode:
raise ResourceNotFoundError(path)
if not self.isdir(dirname(path)):
raise ParentDirectoryMissingError(path)
k = self._sync_set_contents(s3path,"")
# Make sure nothing tries to read past end of socket data
f = LimitBytesFile(k.size,k,"r")
# For streaming reads, return the key object directly
if mode == "r-":
return f
# For everything else, use a RemoteFileBuffer.
# This will take care of closing the socket when it's done.
return RemoteFileBuffer(self,path,mode,f)
def exists(self,path):
"""Check whether a path exists."""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
# The root directory always exists
if self._prefix.startswith(s3path):
return True
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
# A regular file
if _eq_utf8(k.name,s3path):
return True
# A directory
if _eq_utf8(k.name,s3pathD):
return True
return False
def isdir(self,path):
"""Check whether a path exists and is a directory."""
s3path = self._s3path(path) + self._separator
# Root is always a directory
if s3path == "/" or s3path == self._prefix:
return True
# Use a list request so that we return true if there are any files
# in that directory. This avoids requiring a special file for the
# the directory itself, which other tools may not create.
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
try:
iter(ks).next()
except StopIteration:
return False
else:
return True
def isfile(self,path):
"""Check whether a path exists and is a regular file."""
s3path = self._s3path(path)
# Root is never a file
if self._prefix.startswith(s3path):
return False
k = self._s3bukt.get_key(s3path)
if k is not None:
return True
return False
def listdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
return list(self.ilistdir(path,wildcard,full,absolute,
dirs_only,files_only))
def listdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
return list(self.ilistdirinfo(path,wildcard,full,absolute,
dirs_only,files_only))
def ilistdir(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
"""List contents of a directory."""
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return (nm for (nm,k) in entries)
def ilistdirinfo(self,path="./",wildcard=None,full=False,absolute=False,
dirs_only=False,files_only=False):
keys = self._iter_keys(path)
entries = self._filter_keys(path,keys,wildcard,full,absolute,
dirs_only,files_only)
return ((nm,self._get_key_info(k,nm)) for (nm,k) in entries)
def _iter_keys(self,path):
"""Iterator over keys contained in the given directory.
This generator yields (name,key) pairs for each entry in the given
directory. If the path is not a directory, it raises the approprate
error.
"""
s3path = self._s3path(path) + self._separator
if s3path == "/":
s3path = ""
isDir = False
for k in self._s3bukt.list(prefix=s3path,delimiter=self._separator):
if not isDir:
isDir = True
# Skip over the entry for the directory itself, if it exists
name = self._uns3path(k.name,s3path)
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if name.endswith(self._separator):
name = name[:-1]
yield (name,k)
if not isDir:
if s3path != self._prefix:
if self.isfile(path):
msg = "that's not a directory: %(path)s"
raise ResourceInvalidError(path,msg=msg)
raise ResourceNotFoundError(path)
def _key_is_dir(self, k):
if isinstance(k,Prefix):
return True
if k.name.endswith(self._separator):
return True
return False
def _filter_keys(self,path,keys,wildcard,full,absolute,
dirs_only,files_only):
"""Filter out keys not matching the given criteria.
Given a (name,key) iterator as returned by _iter_keys, this method
applies the given filtering criteria and returns a filtered iterator.
"""
sep = self._separator
if dirs_only and files_only:
raise ValueError("dirs_only and files_only can not both be True")
if dirs_only:
keys = ((nm,k) for (nm,k) in keys if self._key_is_dir(k))
elif files_only:
keys = ((nm,k) for (nm,k) in keys if not self._key_is_dir(k))
if wildcard is not None:
if callable(wildcard):
keys = ((nm,k) for (nm,k) in keys if wildcard(nm))
else:
keys = ((nm,k) for (nm,k) in keys if fnmatch(nm,wildcard))
if full:
return ((relpath(pathjoin(path, nm)),k) for (nm,k) in keys)
elif absolute:
return ((abspath(pathjoin(path, nm)),k) for (nm,k) in keys)
return keys
def makedir(self,path,recursive=False,allow_recreate=False):
"""Create a directory at the given path.
The 'mode' argument is accepted for compatibility with the standard
FS interface, but is currently ignored.
"""
s3path = self._s3path(path)
s3pathD = s3path + self._separator
if s3pathD == self._prefix:
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
s3pathP = self._s3path(dirname(path))
if s3pathP:
s3pathP = s3pathP + self._separator
# Check various preconditions using list of parent dir
ks = self._s3bukt.list(prefix=s3pathP,delimiter=self._separator)
if s3pathP == self._prefix:
parentExists = True
else:
parentExists = False
for k in ks:
if not parentExists:
parentExists = True
if _eq_utf8(k.name,s3path):
# It's already a file
msg = "Destination exists as a regular file: %(path)s"
raise ResourceInvalidError(path, msg=msg)
if _eq_utf8(k.name,s3pathD):
# It's already a directory
if allow_recreate:
return
msg = "Can not create a directory that already exists"\
" (try allow_recreate=True): %(path)s"
raise DestinationExistsError(path, msg=msg)
# Create parent if required
if not parentExists:
if recursive:
self.makedir(dirname(path),recursive,allow_recreate)
else:
msg = "Parent directory does not exist: %(path)s"
raise ParentDirectoryMissingError(path, msg=msg)
# Create an empty file representing the directory
self._sync_set_contents(s3pathD,"")
def remove(self,path):
"""Remove the file at the given path."""
s3path = self._s3path(path)
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if _eq_utf8(k.name,s3path):
break
if _startswith_utf8(k.name,s3path + "/"):
msg = "that's not a file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
else:
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
k = self._s3bukt.get_key(s3path)
while k:
k = self._s3bukt.get_key(s3path)
def removedir(self,path,recursive=False,force=False):
"""Remove the directory at the given path."""
if normpath(path) in ('', '/'):
raise RemoveRootError(path)
s3path = self._s3path(path)
if s3path != self._prefix:
s3path = s3path + self._separator
if force:
# If we will be forcibly removing any directory contents, we
# might as well get the un-delimited list straight away.
ks = self._s3bukt.list(prefix=s3path)
else:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
# Fail if the directory is not empty, or remove them if forced
found = False
for k in ks:
found = True
if not _eq_utf8(k.name,s3path):
if not force:
raise DirectoryNotEmptyError(path)
self._s3bukt.delete_key(k.name)
if not found:
if self.isfile(path):
msg = "removedir() called on a regular file: %(path)s"
raise ResourceInvalidError(path,msg=msg)
if path not in ("","/"):
raise ResourceNotFoundError(path)
self._s3bukt.delete_key(s3path)
if recursive and path not in ("","/"):
pdir = dirname(path)
try:
self.removedir(pdir,recursive=True,force=False)
except DirectoryNotEmptyError:
pass
def rename(self,src,dst):
"""Rename the file at 'src' to 'dst'."""
# Actually, in S3 'rename' is exactly the same as 'move'
if self.isfile(src):
self.move(src,dst)
else:
self.movedir(src,dst)
def getinfo(self,path):
s3path = self._s3path(path)
if path in ("","/"):
k = Prefix(bucket=self._s3bukt,name="/")
else:
k = self._s3bukt.get_key(s3path)
if k is None:
ks = self._s3bukt.list(prefix=s3path,delimiter=self._separator)
for k in ks:
if isinstance(k,Prefix):
break
else:
raise ResourceNotFoundError(path)
return self._get_key_info(k,path)
def _get_key_info(self,key,name=None):
info = {}
if name is not None:
info["name"] = basename(name)
else:
info["name"] = basename(self._uns3key(k.name))
if self._key_is_dir(key):
info["st_mode"] = 0700 | statinfo.S_IFDIR
else:
info["st_mode"] = 0700 | statinfo.S_IFREG
if hasattr(key,"size"):
info['size'] = int(key.size)
etag = getattr(key,"etag",None)
if etag is not None:
if isinstance(etag,unicode):
etag = etag.encode("utf8")
info['etag'] = etag.strip('"').strip("'")
if hasattr(key,"last_modified"):
# TODO: does S3 use any other formats?
fmt = "%a, %d %b %Y %H:%M:%S %Z"
try:
mtime = datetime.datetime.strptime(key.last_modified,fmt)
info['modified_time'] = mtime
except ValueError:
pass
return info
def desc(self,path):
return "No description available"
def copy(self,src,dst,overwrite=False,chunk_size=16384):
"""Copy a file from 'src' to 'dst'.
src -- The source path
dst -- The destination path
overwrite -- If True, then the destination may be overwritten
(if a file exists at that location). If False then an exception will be
thrown if the destination exists
chunk_size -- Size of chunks to use in copy (ignored by S3)
"""
s3path_dst = self._s3path(dst)
s3path_dstD = s3path_dst + self._separator
# Check for various preconditions.
ks = self._s3bukt.list(prefix=s3path_dst,delimiter=self._separator)
dstOK = False
for k in ks:
# It exists as a regular file
if _eq_utf8(k.name,s3path_dst):
if not overwrite:
raise DestinationExistsError(dst)
dstOK = True
break
# Check if it refers to a directory. If so, we copy *into* it.
# Since S3 lists in lexicographic order, subsequent iterations
# of the loop will check for the existence of the new filename.
if _eq_utf8(k.name,s3path_dstD):
nm = basename(src)
dst = pathjoin(dirname(dst),nm)
s3path_dst = s3path_dstD + nm
dstOK = True
if not dstOK and not self.isdir(dirname(dst)):
msg = "Destination directory does not exist: %(path)s"
raise ParentDirectoryMissingError(dst,msg=msg)
# OK, now we can copy the file.
s3path_src = self._s3path(src)
try:
self._s3bukt.copy_key(s3path_dst,self._bucket_name,s3path_src)
except S3ResponseError, e:
if "404 Not Found" in str(e):
msg = "Source is not a file: %(path)s"
raise ResourceInvalidError(src, msg=msg)
raise e
else:
k = self._s3bukt.get_key(s3path_dst)
while k is None:
k = self._s3bukt.get_key(s3path_dst)
self._sync_key(k)
def move(self,src,dst,overwrite=False,chunk_size=16384):
"""Move a file from one location to another."""
self.copy(src,dst,overwrite=overwrite)
self._s3bukt.delete_key(self._s3path(src))
def walkfiles(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield item
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield pathjoin(path,name)
def walkinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def walkfilesinfo(self,
path="/",
wildcard=None,
dir_wildcard=None,
search="breadth",
ignore_errors=False ):
if search != "breadth" or dir_wildcard is not None:
args = (wildcard,dir_wildcard,search,ignore_errors)
for item in super(S3FS,self).walkfiles(path,*args):
yield (item,self.getinfo(item))
else:
prefix = self._s3path(path)
for k in self._s3bukt.list(prefix=prefix):
name = relpath(self._uns3path(k.name,prefix))
if name != "":
if not isinstance(name,unicode):
name = name.decode("utf8")
if not k.name.endswith(self._separator):
if wildcard is not None:
if callable(wildcard):
if not wildcard(basename(name)):
continue
else:
if not fnmatch(basename(name),wildcard):
continue
yield (pathjoin(path,name),self._get_key_info(k,name))
def _eq_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1 == name2
def _startswith_utf8(name1,name2):
if isinstance(name1,unicode):
name1 = name1.encode("utf8")
if isinstance(name2,unicode):
name2 = name2.encode("utf8")
return name1.startswith(name2)
| {
"content_hash": "9ec724a52e61d490a5fb76596ad2e2df",
"timestamp": "",
"source": "github",
"line_count": 747,
"max_line_length": 144,
"avg_line_length": 39.13253012048193,
"alnum_prop": 0.5523399014778325,
"repo_name": "pscottdevos/pyfilesystem",
"id": "b72036bd9c30be21f4d95ea8342cf5d5b47097a6",
"size": "29232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fs/s3fs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1057381"
},
{
"name": "Shell",
"bytes": "3083"
}
],
"symlink_target": ""
} |
import sys
import os
includeos_src = os.environ.get('INCLUDEOS_SRC',
os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))).split('/test')[0])
sys.path.insert(0,includeos_src)
from vmrunner import vmrunner
import socket
def transmit_test(grgr):
print "<Test.py> Performing transmit tests"
HOST, PORT = "10.0.0.45", 4242
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = "Someone there?"
sock.sendto(data, (HOST, PORT))
total_bytes = int(sock.recv(1024))
print "<Test.py> Sent: {}".format(data)
print "<Test.py> Incoming: {} bytes".format(total_bytes)
received = 0
while (received < total_bytes):
received += len(sock.recv(1024))
print "<Test.py> Received: {}".format(received)
data = "SUCCESS"
sock.sendto(data, (HOST, PORT))
print "<Test.py> Sent: {}".format(data)
# Get an auto-created VM from the vmrunner
vm = vmrunner.vms[0]
# Add custom event-handler
vm.on_output("Ready", transmit_test)
# Boot the VM, taking a timeout as parameter
vm.cmake().boot(20).clean()
| {
"content_hash": "93ba561330f146ba5d64ac1707421b79",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 120,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.6571164510166358,
"repo_name": "ingve/IncludeOS",
"id": "adffc9e8ca4c62fcbbfc87b00f1c59c7a5985723",
"size": "1105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/net/integration/transmit/test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "50627"
},
{
"name": "C",
"bytes": "201785"
},
{
"name": "C++",
"bytes": "2871717"
},
{
"name": "CMake",
"bytes": "124597"
},
{
"name": "Dockerfile",
"bytes": "665"
},
{
"name": "GDB",
"bytes": "189"
},
{
"name": "JavaScript",
"bytes": "813"
},
{
"name": "Makefile",
"bytes": "1719"
},
{
"name": "Python",
"bytes": "151122"
},
{
"name": "Shell",
"bytes": "77957"
}
],
"symlink_target": ""
} |
def check_for_anagram(string_a, string_b):
char_list_a = list(string_a)
char_list_b = list(string_b)
char_list_a.sort()
char_list_b.sort()
if char_list_a == char_list_b:
print ("Anagram exists")
return True
else:
print ("The words don't form an Anagram")
return False
check_for_anagram("caoalba", "bacalao") | {
"content_hash": "d602d3e817766097a0d933467b74d10a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 24.4,
"alnum_prop": 0.6010928961748634,
"repo_name": "anthonynsimon/python-data-structures-algorithms",
"id": "0e22cd9a09cba6eb6fc2f8395112592072edbd50",
"size": "366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/playground/anagram_checker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "91710"
}
],
"symlink_target": ""
} |
import time
import unittest
from phaxio import PhaxioApi
from phaxio.exceptions import AuthenticationError, APIError
try:
raw_input
except NameError:
raw_input = input
class ErrorHandlingTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ErrorHandlingTestCase, cls).setUpClass()
cls.key = raw_input('Enter test API key: ')
cls.secret = raw_input('Enter secret: ')
def setUp(self):
super(ErrorHandlingTestCase, self).setUp()
# Due to Phaxio's API rate limiting,
# we will wait 1 second between each test
time.sleep(1)
def test_valid_request(self):
api = PhaxioApi(self.key, self.secret, raise_errors=True)
response = api.send(to='8138014253', string_data='test')
self.assertTrue(response['success'])
def test_authentication_error(self):
api = PhaxioApi('invalid_key', 'invalid_secret', raise_errors=True)
self.assertRaises(AuthenticationError, api.send, to='8138014253', string_data='test')
def test_api_error(self):
api = PhaxioApi(self.key, self.secret, raise_errors=True)
self.assertRaises(APIError, api.send, to='invalid_number', string_data='test')
def test_raise_errors_option(self):
api = PhaxioApi(self.key, self.secret, raise_errors=False)
response = api.send(to='invalid_number', string_data='test')
self.assertFalse(response['success'])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9b660134de0b61cfaca14015badaf0ac",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 93,
"avg_line_length": 30.571428571428573,
"alnum_prop": 0.6668891855807744,
"repo_name": "jfialkoff/pyphaxio",
"id": "a960e45a07a63af522f0fd2637df692237bc68d7",
"size": "1498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8758"
}
],
"symlink_target": ""
} |
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class DatasetConstructorTest(test.TestCase):
def testTensorDataset(self):
"""Test an dataset that represents a single tuple of tensors."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
iterator = (dataset_ops.Dataset.from_tensors(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDataset(self):
"""Test an dataset that represents the slices from a tuple of tensors."""
components = (
np.tile(np.array([[1], [2], [3], [4]]), 20), np.tile(
np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
)
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
sess.run(init_op)
for i in range(4):
results = sess.run(get_next)
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testTensorSliceDatasetWithDict(self):
components = {"foo": [1, 2, 3], "bar": [[4.0], [5.0], [6.0]]}
iterator = (dataset_ops.Dataset.from_tensor_slices(components)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual(dtypes.int32, iterator.output_types["foo"])
self.assertEqual(dtypes.float32, iterator.output_types["bar"])
self.assertEqual((), iterator.output_shapes["foo"])
self.assertEqual((1,), iterator.output_shapes["bar"])
with self.test_session() as sess:
sess.run(init_op)
for i in range(3):
results = sess.run(get_next)
self.assertEqual(components["foo"][i], results["foo"])
self.assertEqual(components["bar"][i], results["bar"])
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseTensorSliceDataset(self):
"""Test a dataset based on slices of a `tf.SparseTensor`."""
st = array_ops.sparse_placeholder(dtypes.float64)
iterator = (dataset_ops.Dataset.from_sparse_tensor_slices(st)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = sparse_tensor.SparseTensor(*iterator.get_next())
with self.test_session() as sess:
slices = [[1., 2., 3.], [1.], [1.], [1., 2.], [], [1., 2.], [], [], []]
# Test with sparse tensor in the appropriate order.
indices = np.array(
[[i, j] for i in range(len(slices)) for j in range(len(slices[i]))])
values = np.array([val for s in slices for val in s])
dense_shape = np.array([len(slices), max(len(s) for s in slices) + 1])
sparse_feed = sparse_tensor.SparseTensorValue(indices, values,
dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
for i, s in enumerate(slices):
results = sess.run(get_next)
self.assertAllEqual(s, results.values)
expected_indices = np.array(
[[j] for j in range(len(slices[i]))]).reshape([-1, 1])
self.assertAllEqual(expected_indices, results.indices)
self.assertAllEqual(dense_shape[1:], results.dense_shape)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test with sparse tensor in the reverse order, which is not
# currently supported.
reverse_order_indices = indices[::-1, :]
reverse_order_values = values[::-1]
sparse_feed = sparse_tensor.SparseTensorValue(
reverse_order_indices, reverse_order_values, dense_shape)
with self.assertRaises(errors.UnimplementedError):
sess.run(init_op, feed_dict={st: sparse_feed})
# Test with an empty sparse tensor.
empty_indices = np.empty((0, 4), dtype=np.int64)
empty_values = np.empty((0,), dtype=np.float64)
empty_dense_shape = [0, 4, 37, 9]
sparse_feed = sparse_tensor.SparseTensorValue(empty_indices, empty_values,
empty_dense_shape)
sess.run(init_op, feed_dict={st: sparse_feed})
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# pylint: disable=g-long-lambda,unnecessary-lambda
def testNestedStructure(self):
components = (np.array([1, 2, 3]), (np.array([4., 5.]), np.array([6., 7.])),
np.array([8, 9, 10]))
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.shuffle(10, 10)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.repeat(-1)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.filter(lambda x, y, z: True)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.take(5)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([3], ([2], [2]), [3]), dataset.output_shapes)
dataset = dataset.map(lambda x, y, z: ((x, z), (y[0], y[1])))
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.flat_map(
lambda x, y: dataset_ops.Dataset.from_tensors(((x[0], x[1]),
(y[0], y[1])))
)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([3], [3]), ([2], [2])), dataset.output_shapes)
dataset = dataset.batch(32)
self.assertEquals(((dtypes.int64, dtypes.int64),
(dtypes.float64, dtypes.float64)), dataset.output_types)
self.assertEquals((([None, 3], [None, 3]), ([None, 2], [None, 2])),
nest.pack_sequence_as(dataset.output_shapes, [
s.as_list()
for s in nest.flatten(dataset.output_shapes)
]))
iterator = dataset.make_one_shot_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
iterator = dataset.make_initializable_iterator()
(w, x), (y, z) = iterator.get_next()
self.assertEquals(dtypes.int64, w.dtype)
self.assertEquals(dtypes.int64, x.dtype)
self.assertEquals(dtypes.float64, y.dtype)
self.assertEquals(dtypes.float64, z.dtype)
self.assertEquals([None, 3], w.shape.as_list())
self.assertEquals([None, 3], x.shape.as_list())
self.assertEquals([None, 2], y.shape.as_list())
self.assertEquals([None, 2], z.shape.as_list())
# Define a separate set of components with matching leading
# dimension for the from-slices constructor.
components_for_slices = (np.array([1, 2, 3]), (np.array(
[4., 5., 6.]), np.array([7., 8., 9.])), np.array([10, 11, 12]))
dataset = dataset_ops.Dataset.from_tensor_slices(components_for_slices)
self.assertEquals((dtypes.int64, (dtypes.float64, dtypes.float64),
dtypes.int64), dataset.output_types)
self.assertEquals(([], ([], []), []), dataset.output_shapes)
def testNestedDict(self):
components = {"a": {"aa": 1, "ab": [2.0, 2.0]}, "b": [3, 3, 3]}
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int32, dataset.output_types["a"]["aa"])
self.assertEquals(dtypes.float32, dataset.output_types["a"]["ab"])
self.assertEquals(dtypes.int32, dataset.output_types["b"])
self.assertEquals([], dataset.output_shapes["a"]["aa"])
self.assertEquals([2], dataset.output_shapes["a"]["ab"])
self.assertEquals([3], dataset.output_shapes["b"])
def testNonSequenceNestedStructure(self):
components = np.array([1, 2, 3])
dataset = dataset_ops.Dataset.from_tensors(components)
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.filter(
lambda x: math_ops.reduce_all(math_ops.equal(x, components)))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
dataset = dataset.map(lambda x: array_ops.stack([x, x]))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([2, 3], dataset.output_shapes)
dataset = dataset.flat_map(
lambda x: dataset_ops.Dataset.from_tensor_slices(x))
self.assertEquals(dtypes.int64, dataset.output_types)
self.assertEquals([3], dataset.output_shapes)
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
self.assertEquals(dtypes.int64, get_next.dtype)
self.assertEquals([3], get_next.shape)
def _testFromGenerator(self, generator, elem_sequence, num_repeats):
iterator = (
dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)
.repeat(num_repeats)
.prefetch(5)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
for _ in range(2): # Run twice to test reinitialization.
sess.run(init_op)
for _ in range(num_repeats):
for elem in elem_sequence:
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def _testFromGeneratorOneShot(self, generator, elem_sequence, num_repeats):
iterator = (
dataset_ops.Dataset.from_generator(generator, output_types=dtypes.int64)
.repeat(num_repeats)
.prefetch(5)
.make_one_shot_iterator())
get_next = iterator.get_next()
with self.test_session() as sess:
for _ in range(num_repeats):
for elem in elem_sequence:
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorUsingFunction(self):
def generator():
for i in range(1, 100):
yield [i] * i
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
self._testFromGeneratorOneShot(generator, elem_sequence, 1)
self._testFromGeneratorOneShot(generator, elem_sequence, 5)
def testFromGeneratorUsingList(self):
generator = lambda: [[i] * i for i in range(1, 100)]
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromGeneratorUsingNdarray(self):
generator = lambda: np.arange(100, dtype=np.int64)
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromGeneratorUsingGeneratorExpression(self):
# NOTE(mrry): Generator *expressions* are not repeatable (or in
# general reusable), because they eagerly evaluate the `for`
# expression as `iter(range(1, 100))` and discard the means of
# reconstructing `range(1, 100)`. Wrapping the generator
# expression in a `lambda` makes it repeatable.
generator = lambda: ([i] * i for i in range(1, 100))
elem_sequence = list(generator())
self._testFromGenerator(generator, elem_sequence, 1)
self._testFromGenerator(generator, elem_sequence, 5)
def testFromMultipleConcurrentGenerators(self):
num_inner_repeats = 5
num_outer_repeats = 100
def generator():
for i in range(1, 10):
yield ([i] * i, [i, i ** 2, i ** 3])
input_list = list(generator())
# The interleave transformation is essentially a flat map that
# draws from multiple input datasets concurrently (in a cyclic
# fashion). By placing `Datsaet.from_generator()` inside an
# interleave, we test its behavior when multiple iterators are
# active at the same time; by additionally prefetching inside the
# interleave, we create the possibility of parallel (modulo GIL)
# invocations to several iterators created by the same dataset.
def interleave_fn(_):
return (dataset_ops.Dataset.from_generator(
generator, output_types=(dtypes.int64, dtypes.int64),
output_shapes=([None], [3]))
.repeat(num_inner_repeats).prefetch(5))
iterator = (
dataset_ops.Dataset.range(num_outer_repeats)
.interleave(interleave_fn, cycle_length=10,
block_length=len(input_list))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(num_inner_repeats * num_outer_repeats):
for elem in input_list:
val0, val1 = sess.run(get_next)
self.assertAllEqual(elem[0], val0)
self.assertAllEqual(elem[1], val1)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorsRunningInParallel(self):
num_parallel_iterators = 3
# Define shared state that multiple iterator instances will access to
# demonstrate their concurrent activity.
lock = threading.Lock()
condition = threading.Condition(lock)
next_ticket = [0] # GUARDED_BY(lock)
def generator():
# NOTE(mrry): We yield one element before the barrier, because
# the current implementation of `Dataset.interleave()` must
# fetch one element from each incoming dataset to start the
# prefetching.
yield 0
# Define a barrier that `num_parallel_iterators` iterators must enter
# before any can proceed. Demonstrates that multiple iterators may be
# active at the same time.
condition.acquire()
ticket = next_ticket[0]
next_ticket[0] += 1
if ticket == num_parallel_iterators - 1:
# The last iterator to join the barrier notifies the others.
condition.notify_all()
else:
# Wait until the last iterator enters the barrier.
while next_ticket[0] < num_parallel_iterators:
condition.wait()
condition.release()
yield 1
# As in `testFromMultipleConcurrentGenerators()`, we use a combination of
# `Dataset.interleave()` and `Dataset.prefetch()` to cause multiple
# iterators to be active concurrently.
def interleave_fn(_):
return dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[]).prefetch(2)
iterator = (
dataset_ops.Dataset.range(num_parallel_iterators)
.interleave(
interleave_fn, cycle_length=num_parallel_iterators, block_length=1)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for elem in [0, 1]:
for _ in range(num_parallel_iterators):
self.assertAllEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorTypeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield "ERROR"
yield np.array([7, 8, 9], dtype=np.int64)
iterator = (dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
self.assertAllEqual([4, 5, 6], sess.run(get_next))
with self.assertRaisesOpError(r"element of type .*int64.* was expected"):
sess.run(get_next)
self.assertAllEqual([7, 8, 9], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFromGeneratorShapeError(self):
def generator():
yield np.array([1, 2, 3], dtype=np.int64)
yield np.array([4, 5, 6], dtype=np.int64)
yield np.array([7, 8, 9, 10], dtype=np.int64)
yield np.array([11, 12, 13], dtype=np.int64)
iterator = (dataset_ops.Dataset.from_generator(
generator, output_types=dtypes.int64, output_shapes=[3])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual([1, 2, 3], sess.run(get_next))
self.assertAllEqual([4, 5, 6], sess.run(get_next))
with self.assertRaisesOpError(r"element of shape \(3,\) was expected"):
sess.run(get_next)
self.assertAllEqual([11, 12, 13], sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSplitPipelineFailsWithPlacementError(self):
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
dataset = dataset_ops.Dataset.from_tensors(0)
# Define a pipeline that attempts to use variables on two
# different devices.
#
# Initialize the variables before creating to iterator, to avoid the
# placement algorithm overriding the DT_RESOURCE colocation constraints.
with ops.device("/cpu:0"):
var_0 = resource_variable_ops.ResourceVariable(initial_value=0)
dataset = dataset.map(lambda x: x + var_0.read_value())
sess.run(var_0.initializer)
with ops.device("/cpu:1"):
var_1 = resource_variable_ops.ResourceVariable(initial_value=0)
dataset = dataset.map(lambda x: x + var_1.read_value())
sess.run(var_1.initializer)
iterator = dataset.make_initializable_iterator()
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to access resource located in device"):
sess.run(iterator.initializer)
def testRestructureDataset(self):
components = (array_ops.placeholder(dtypes.int32),
(array_ops.placeholder(dtypes.int32, shape=[None]),
array_ops.placeholder(dtypes.int32, shape=[20, 30])))
dataset = dataset_ops.Dataset.from_tensors(components)
i32 = dtypes.int32
test_cases = [((i32, i32, i32), None),
(((i32, i32), i32), None),
((i32, i32, i32), (None, None, None)),
((i32, i32, i32), ([17], [17], [20, 30]))]
for new_types, new_shape_lists in test_cases:
# pylint: disable=protected-access
new = dataset_ops._RestructuredDataset(
dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
self.assertEqual(new_types, new.output_types)
if new_shape_lists is not None:
for expected_shape_list, shape in zip(
nest.flatten(new_shape_lists), nest.flatten(new.output_shapes)):
if expected_shape_list is None:
self.assertIs(None, shape.ndims)
else:
self.assertEqual(expected_shape_list, shape.as_list())
fail_cases = [((i32, dtypes.int64, i32), None),
((i32, i32, i32, i32), None),
((i32, i32, i32), ((None, None), None)),
((i32, i32, i32), (None, None, None, None)),
((i32, i32, i32), (None, [None], [21, 30]))]
for new_types, new_shape_lists in fail_cases:
with self.assertRaises(ValueError):
# pylint: disable=protected-access
new = dataset_ops._RestructuredDataset(
dataset, new_types, new_shape_lists)
# pylint: enable=protected-access
if __name__ == "__main__":
test.main()
| {
"content_hash": "78a666280cddfbf2932579bd0a20e936",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 80,
"avg_line_length": 41.34879406307978,
"alnum_prop": 0.6410463498900705,
"repo_name": "tornadozou/tensorflow",
"id": "acbd117a3312ee0374dc6fff215fe2c22db2e366",
"size": "22976",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/kernel_tests/dataset_constructor_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29734773"
},
{
"name": "CMake",
"bytes": "647266"
},
{
"name": "Go",
"bytes": "976912"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "276756"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26531000"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373122"
}
],
"symlink_target": ""
} |
import requests
import io
import json
class finboxRequest:
def __init__(self):
self.url = "https://api.finbox.io/beta"
self.headers = {
'authorization': "Bearer 509e960d5d374b8958f00bd5e977d13f001925fe",
'accept': "application/json",
'content-type': "application/json"
}
self.data = json.dumps({
"data": {
"profile": {
"symbol": "AAPL.ticker",
"name": "AAPL.company_name",
"description": "AAPL.description",
"historicals": {
"revenue": "AAPL.total_revenue[FY-9:FY]",
"ebit": "AAPL.adjusted_ebit[FY-9:FY]",
"assets": "AAPL.total_assets[FY-9:FY]"
},
"market": {
"year_high": "AAPL.year_range_high",
"year_low": "AAPL.year_range_low",
"beta": "AAPL.beta",
"price": "AAPL.stock_price"
}
}}
})
def getUrl(self, extension=None):
return (self.url + extension)
fRequest = finboxRequest()
response = requests.post(fRequest.getUrl('/data/batch'),
data=fRequest.data, headers=fRequest.headers)
print(response.text)
| {
"content_hash": "33c9339d4b3292b335ad5c5ab3ec9ad3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 79,
"avg_line_length": 29.555555555555557,
"alnum_prop": 0.4781954887218045,
"repo_name": "kevin-f-liu/WSBAuto",
"id": "50532815b211070735086ab877df6b0b2d77ee4f",
"size": "1330",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/finbox_master/finbox_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43329"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [
('sentry', '0011_remove_pagerdutyservice_service_id_from_state'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[
migrations.RunSQL(
"""
ALTER TABLE "sentry_pagerdutyservice" DROP COLUMN "service_id";
""",
reverse_sql="""
ALTER TABLE "sentry_pagerdutyservice" ADD COLUMN "service_id" varchar(255) NULL;
""",
)
],
state_operations=[],
)
]
| {
"content_hash": "ec6a1bb3a0e1fe84e83df826075cd677",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 104,
"avg_line_length": 42.02564102564103,
"alnum_prop": 0.6259914582062233,
"repo_name": "beeftornado/sentry",
"id": "a9cba66ba57750c4745f8ac5f40ddf82593df280",
"size": "1663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0012_remove_pagerdutyservice_service_id.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from autorski.models import Joke
class JokeAdmin(admin.ModelAdmin):
list_display = ['author', 'effective_votes', 'date']
list_filter = ['date']
search_fields = ['body', 'author']
def effective_votes(self, obj):
return obj.votes
admin.site.register(Joke, JokeAdmin)
| {
"content_hash": "a0cdbd24da7c93ffe6bedc964fb0002c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 56,
"avg_line_length": 21.866666666666667,
"alnum_prop": 0.6829268292682927,
"repo_name": "jchmura/suchary-django",
"id": "62c6ee6fc2a71d5b7c1dc798cd52747d1cf3c696",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autorski/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1111"
},
{
"name": "HTML",
"bytes": "31788"
},
{
"name": "JavaScript",
"bytes": "39560"
},
{
"name": "Python",
"bytes": "46402"
}
],
"symlink_target": ""
} |
from pygments.formatters import HtmlFormatter
from django.shortcuts import render, get_object_or_404, redirect
from .models import Media
def share_view(request, url_hash):
media_item = get_object_or_404(Media, url_hash=url_hash)
media_item.view_count += 1
media_item.save()
if media_item.media_type == "URL":
return redirect(media_item.target_url)
templates = {
"IMG": "nimbus/media/share_img_preview.html",
"TXT": "nimbus/media/share_txt_preview.html"
}
template = templates.setdefault(media_item.media_type, "nimbus/media/share_download.html")
context = {
"media_item": media_item
}
if media_item.media_type == "TXT":
context["syntax_highlighting_style_defs"] = HtmlFormatter().get_style_defs('.highlight')
return render(request, template, context)
| {
"content_hash": "407213bfb32f671d0c185cec3aee599f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 96,
"avg_line_length": 31.185185185185187,
"alnum_prop": 0.6745843230403801,
"repo_name": "ethanal/Nimbus",
"id": "8a1463cd0eb6f5598747838dce12812b67e3e74c",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nimbus/apps/media/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3275"
},
{
"name": "HTML",
"bytes": "16755"
},
{
"name": "JavaScript",
"bytes": "4601"
},
{
"name": "Objective-C",
"bytes": "5971"
},
{
"name": "Python",
"bytes": "29675"
},
{
"name": "Swift",
"bytes": "61587"
}
],
"symlink_target": ""
} |
import os
from module_base import ModuleBase
from module_mixins import IntrospectModuleMixin,\
FileOpenDialogModuleMixin
import module_utils
from external import transfer_function_widget
import vtk
import wx
TF_LIBRARY = {
'CT Hip (bones+vasculature)' : [
(-1024.0, (0, 0, 0), 0),
(184.65573770491801, (255, 128, 128), 0.0),
(225.20534629404619, (255, 128, 128), 0.73857868020304562),
(304.8359659781288, (255, 128, 128), 0.0),
(377.70491803278696, (233, 231, 148), 0.0),
(379.48967193195631, (233, 231, 148), 1.0),
(3072.0, (255, 255, 255), 1)],
'CT Hip (test)' : [
(-1024.0, (0, 0, 0), 0),
(117.50819672131138, (255, 128, 128), 0.0),
(595.93442622950829, (255, 255, 255), 1.0),
(3072.0, (255, 255, 255), 1)],
'Panoramix (prototype)' : [
(-1024.0, (0, 0, 0), 0),
(136.33994334277622, (214, 115, 115), 0.0),
(159.5467422096317, (230, 99, 99), 0.24788732394366197),
(200.1586402266289, (255, 128, 128), 0.0),
(252.37393767705385, (206, 206, 61), 0.40000000000000002),
(287.18413597733706, (255, 128, 128), 0.0),
(403.21813031161469, (206, 61, 67), 0.13521126760563384),
(525.05382436260629, (255, 255, 255), 0.0),
(612.07932011331445, (255, 255, 255), 0.92957746478873238),
(3072.0, (255, 255, 255), 1)]
}
class TransferFunctionEditor(IntrospectModuleMixin, FileOpenDialogModuleMixin, ModuleBase):
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
self._volume_input = None
self._opacity_tf = vtk.vtkPiecewiseFunction()
self._colour_tf = vtk.vtkColorTransferFunction()
self._lut = vtk.vtkLookupTable()
# list of tuples, where each tuple (scalar_value, (r,g,b,a))
self._config.transfer_function = [
(0, (0,0,0), 0),
(255, (255,255,255), 1)
]
self._view_frame = None
self._create_view_frame()
self._bind_events()
self.view()
# all modules should toggle this once they have shown their
# stuff.
self.view_initialised = True
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
def _bind_events(self):
def handler_blaat(event):
tf_widget = event.GetEventObject() # the tf_widget
ret = tf_widget.get_current_point_info()
if not ret is None:
val, col, opacity = ret
vf = self._view_frame
vf.colour_button.SetBackgroundColour(col)
vf.cur_scalar_text.SetValue('%.2f' % (val,))
vf.cur_col_text.SetValue(str(col))
vf.cur_opacity_text.SetValue('%.2f' % (opacity,))
vf = self._view_frame
tfw = vf.tf_widget
tfw.Bind(transfer_function_widget.EVT_CUR_PT_CHANGED,
handler_blaat)
def handler_colour_button(event):
coldialog = wx.ColourDialog(vf, tfw.colour_data)
if coldialog.ShowModal() == wx.ID_OK:
colour = coldialog.GetColourData().GetColour().Get()
tfw.colour_data = coldialog.GetColourData()
tfw.set_current_point_colour(colour)
vf.colour_button.Bind(wx.EVT_BUTTON, handler_colour_button)
def handler_delete_button(event):
tfw.delete_current_point()
vf.delete_button.Bind(wx.EVT_BUTTON, handler_delete_button)
def handler_auto_range_button(event):
try:
range = self._volume_input.GetScalarRange()
except AttributeError:
self._module_manager.log_error(
'Could not determine range from input. ' +
'Have you connected some input data and ' +
'has the network executed at least once?')
else:
vf = self._view_frame
vf.scalar_min_text.SetValue(str(range[0]))
vf.scalar_max_text.SetValue(str(range[1]))
vf.auto_range_button.Bind(wx.EVT_BUTTON,
handler_auto_range_button)
def handler_apply_range_button(event):
try:
min = float(vf.scalar_min_text.GetValue())
max = float(vf.scalar_max_text.GetValue())
except ValueError:
self._module_manager.log_error(
'Invalid scalar MIN / MAX.')
else:
tfw.set_min_max(min, max)
vf.apply_range_button.Bind(wx.EVT_BUTTON,
handler_apply_range_button)
def handler_load_preset_button(event):
key = vf.preset_choice.GetStringSelection()
preset_tf = TF_LIBRARY[key]
tfw.set_transfer_function(preset_tf)
vf.load_preset_button.Bind(wx.EVT_BUTTON,
handler_load_preset_button)
def handler_file_save_button(event):
filename = self.filename_browse(self._view_frame,
'Select DVTF filename to save to',
'DeVIDE Transfer Function (*.dvtf)|*.dvtf|All files (*)|*',
style=wx.SAVE)
if filename:
# if the user has NOT specified any fileextension, we
# add .dvtf. (on Win this gets added by the
# FileSelector automatically, on Linux it doesn't)
if os.path.splitext(filename)[1] == '':
filename = '%s.dvtf' % (filename,)
self._save_tf_to_file(filename)
vf.file_save_button.Bind(wx.EVT_BUTTON,
handler_file_save_button)
def handler_file_load_button(event):
filename = self.filename_browse(self._view_frame,
'Select DVTF filename to load',
'DeVIDE Transfer Function (*.dvtf)|*.dvtf|All files (*)|*',
style=wx.OPEN)
if filename:
self._load_tf_from_file(filename)
vf.file_load_button.Bind(wx.EVT_BUTTON,
handler_file_load_button)
# auto_range_button
def _create_view_frame(self):
import resources.python.tfeditorframe
reload(resources.python.tfeditorframe)
self._view_frame = module_utils.instantiate_module_view_frame(
self, self._module_manager,
resources.python.tfeditorframe.TFEditorFrame)
module_utils.create_standard_object_introspection(
self, self._view_frame, self._view_frame.view_frame_panel,
{'Module (self)' : self})
# add the ECASH buttons
module_utils.create_eoca_buttons(self, self._view_frame,
self._view_frame.view_frame_panel)
# and customize the presets choice
vf = self._view_frame
keys = TF_LIBRARY.keys()
keys.sort()
vf.preset_choice.Clear()
for key in keys:
vf.preset_choice.Append(key)
vf.preset_choice.Select(0)
def close(self):
for i in range(len(self.get_input_descriptions())):
self.set_input(i, None)
self._view_frame.Destroy()
del self._view_frame
ModuleBase.close(self)
def get_input_descriptions(self):
return ('Optional input volume',)
def get_output_descriptions(self):
return ('VTK Opacity Transfer Function',
'VTK Colour Transfer Function',
'VTK Lookup Table')
def set_input(self, idx, input_stream):
self._volume_input = input_stream
def get_output(self, idx):
if idx == 0:
return self._opacity_tf
elif idx == 1:
return self._colour_tf
else:
return self._lut
def logic_to_config(self):
pass
def config_to_logic(self):
self._opacity_tf.RemoveAllPoints()
self._colour_tf.RemoveAllPoints()
for p in self._config.transfer_function:
self._opacity_tf.AddPoint(p[0], p[2])
r,g,b = [i / 255.0 for i in p[1]]
self._colour_tf.AddRGBPoint(
p[0],r,g,b)
lut_res = 1024
minmax = self._view_frame.tf_widget.get_min_max()
self._lut.SetTableRange(minmax)
self._lut.SetNumberOfTableValues(lut_res)
# lut_res - 1: lut_res points == lut_res-1 intervals
incr = (minmax[1] - minmax[0]) / float(lut_res - 1)
for i in range(lut_res):
v = minmax[0] + i * incr
rgb = self._colour_tf.GetColor(v)
o = self._opacity_tf.GetValue(v)
self._lut.SetTableValue(i, rgb + (o,))
def view_to_config(self):
self._config.transfer_function = \
self._view_frame.tf_widget.get_transfer_function()
def config_to_view(self):
vf = self._view_frame
tfw = vf.tf_widget
tfw.set_transfer_function(
self._config.transfer_function)
min,max = tfw.get_min_max()
vf.scalar_min_text.SetValue('%.1f' % (min,))
vf.scalar_max_text.SetValue('%.1f' % (max,))
def view(self):
self._view_frame.Show()
self._view_frame.Raise()
def execute_module(self):
pass
def _load_tf_from_file(self, filename):
try:
loadf = file(filename, 'r')
tf = eval(loadf.read(), {"__builtins__": {}})
loadf.close()
except Exception, e:
self._module_manager.log_error_with_exception(
'Could not load transfer function: %s.' %
(str(e),))
else:
self._view_frame.tf_widget.set_transfer_function(
tf)
def _save_tf_to_file(self, filename):
tf = self._view_frame.tf_widget.get_transfer_function()
try:
savef = file(filename, 'w')
savef.write("# DeVIDE Transfer Function DVTF v1.0\n%s" % \
(str(tf),))
savef.close()
except Exception, e:
self._module_manager.log_error(
'Error saving transfer function: %s.' % (str(e),))
else:
self._module_manager.log_message(
'Saved %s.' % (filename,))
| {
"content_hash": "9afef4b163b4d1404c4605bed47b59cb",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 91,
"avg_line_length": 33.45192307692308,
"alnum_prop": 0.5398102903133084,
"repo_name": "chrisidefix/devide",
"id": "9cb16f576ecc27f00f07da74de6bca3c1962920b",
"size": "10762",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "modules/viewers/TransferFunctionEditor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Diff",
"bytes": "1373"
},
{
"name": "NSIS",
"bytes": "2786"
},
{
"name": "Python",
"bytes": "3104368"
},
{
"name": "Shell",
"bytes": "7369"
}
],
"symlink_target": ""
} |
import tensorflow as tf
def feed_forward_nn(input_tensor,
num_hidden_layers,
output_dim,
keep_prob=None,
hidden_dim=-1,
activation="tanh",
normalizer="none"):
"""Creates a fully connected feed forward neural network.
Args:
input_tensor: shape [batch_size*num_nodes, input_dim], assumed to be
the final node states after the propgation step concat with the
initial nodes.
num_hidden_layers (int32): number of hidden layers in the network
set to 0 for a linear network.
output_dim (int32): dimension of the output of the network.
keep_prob (scalar tensor or float): Dropout keep prob.
hidden_dim (int32): size of the hidden layers
activation (string): tanh or relu
normalizer (string): layer or none
Returns:
tensor of shape [batch_size * num_nodes, output_dim]
note there is no non-linearity applied to the output.
Raises:
Exception: If given activation or normalizer not supported.
"""
if activation == "tanh":
act = tf.tanh
elif activation == "relu":
act = tf.nn.relu
else:
raise ValueError("Invalid activation: {}".format(activation))
if normalizer == "layer":
norm = tf.contrib.layers.layer_norm
elif normalizer == "none":
norm = None
else:
raise ValueError("Invalid normalizer: {}".format(normalizer))
h_nn = input_tensor # first set of "hidden" units is the input
for i in xrange(num_hidden_layers):
with tf.name_scope("fully_connected/layer{}".format(i + 1)):
layer_dim = h_nn.get_shape()[1].value
w = tf.get_variable("W{}".format(i), shape=[layer_dim, hidden_dim])
b = tf.get_variable("b{}".format(i), shape=[hidden_dim])
h_nn = act(tf.matmul(h_nn, w) + b)
if norm is not None:
h_nn = norm(h_nn)
if keep_prob is not None:
h_nn = tf.nn.dropout(h_nn, keep_prob)
tf.summary.histogram("h_nn{}".format(i), h_nn)
layer_dim = h_nn.get_shape()[1].value
output_w = tf.get_variable("output_W", shape=[layer_dim, output_dim])
output_b = tf.get_variable("output_b", shape=[output_dim])
# final output has no non-linearity, this is applied outside this function
nn_output = tf.matmul(h_nn, output_w) + output_b
return nn_output
| {
"content_hash": "d0705d9c45687929eae8a55eac26ca76",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 34.4264705882353,
"alnum_prop": 0.6292182827851346,
"repo_name": "brain-research/mpnn",
"id": "3d74ba7cf67f174ed993f2eb71a759925dcbc839",
"size": "3003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graph_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55519"
}
],
"symlink_target": ""
} |
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.functions.pooling import pooling_2d
from chainer.utils import conv
from chainer.utils import conv_nd
from chainer.utils import type_check
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
_cudnn_version = libcudnn.getVersion()
_check_cudnn_acceptable_type = pooling_2d._check_cudnn_acceptable_type
class _PoolingND(function.Function):
"""Base class of pooling function over a set of N-dimensional planes."""
def __init__(self, ndim, ksize, stride=None, pad=0, cover_all=True):
if stride is None:
stride = ksize
self.ndim = ndim
self.ksize = conv_nd.as_tuple(ksize, ndim)
self.stride = conv_nd.as_tuple(stride, ndim)
self.pad = conv_nd.as_tuple(pad, ndim)
self.cover_all = cover_all
self._used_cudnn = False
def check_type_forward(self, in_types):
type_check.expect(
in_types.size() == 1,
in_types[0].dtype.kind == 'f',
in_types[0].ndim == 2 + self.ndim
)
def forward_gpu(self, x):
self._used_cudnn = True
# Implementation using cuDNN.
x = cuda.cupy.ascontiguousarray(x[0])
n, c = x.shape[:2]
dims = x.shape[2:]
ys = tuple(conv.get_conv_outsize(d, k, s, p, self.cover_all)
for d, k, s, p in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x.dtype)
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(y)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
libcudnn.poolingForward(
handle, pool_desc.value, one.data, x_desc.value,
x.data.ptr, zero.data, y_desc.value, y.data.ptr)
self.retain_outputs((0,))
return y,
def backward_gpu(self, x, gy):
# Implementation using cudnn
x = cuda.cupy.ascontiguousarray(x[0])
y = self.output_data[0]
handle = cudnn.get_handle()
pool_desc = self.create_pool_desc()
gy = cuda.cupy.ascontiguousarray(gy[0])
x_desc = cudnn.create_tensor_descriptor(x)
y_desc = cudnn.create_tensor_descriptor(gy)
oz_dtype = 'd' if x.dtype == 'd' else 'f'
one = numpy.array(1, dtype=oz_dtype).ctypes
zero = numpy.array(0, dtype=oz_dtype).ctypes
gx = cuda.cupy.empty_like(x)
libcudnn.poolingBackward(
handle, pool_desc.value, one.data, y_desc.value,
y.data.ptr, y_desc.value, gy.data.ptr, x_desc.value,
x.data.ptr, zero.data, x_desc.value, gx.data.ptr)
return gx,
def create_pool_desc(self):
raise NotImplementedError()
| {
"content_hash": "b7aefaa0d80318b46e46fec73e55c5bd",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 32.12765957446808,
"alnum_prop": 0.5960264900662252,
"repo_name": "delta2323/chainer",
"id": "232004753bf25c3f13bae29d9c2daf3226576b27",
"size": "3020",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chainer/functions/pooling/pooling_nd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "2596439"
}
],
"symlink_target": ""
} |
"""
PointStream -- The main galvo multiple object drawing algorithm.
This code is responsible for drawing multiple objects.
It will need to be improved for efficiency.
FIXME/NOTE: The documentation / variable names are a bit out of date.
"Ball", where it occurs, means "entity object".
"""
import math
import random
import itertools
import sys
import thread
import time
#import pygame
# GLOBALS
from globalvals import *
from entities import *
class PointStream(object):
def __init__(self):
self.called = False
self.stream = self.produce()
# A list of all the objects to draw
# XXX: For now, add and remove manually.
self.objects = []
def produce(self):
"""
This infinite loop functions as an infinite point generator.
It generates points for both balls as well as the "blanking"
that must occur between them.
"""
while True:
#print "POINT STREAM LOOP BEGIN"
curObj = None # XXX SCOPE HERE FOR DEBUG ONLY
nextObj = None # XXX SCOPE HERE FOR DEBUG ONLY
try:
# Generate and cache the first points of the objects.
# Necessary in order to slow down galvo tracking as we
# move to the next object.
for b in self.objects:
b.cacheFirstPt()
# Objects to destroy at end of loop
destroy = []
"""
# TOPOLOGICAL SORT OF OBJECTS TO MAKE DRAWING W/
# GALVOS EFFICIENT!
sortedObjects = []
presort = self.objects[:]
sortedObjects.append(presort.pop(0))
while len(presort):
#lowx = presort[0].x
lastObj = sortedObjects[-1]
lowdist = 10000000
li = 0
for i in range(len(presort)):
obj = presort[i]
a = obj.x - lastObj.x
b = obj.y - lastObj.y
c = math.sqrt(a**2 + b**2)
if c < lowdist:
lowdist = c
li = i
sortedObjects.append(presort.pop(li))
#sortedObjects = self.objects[:]
self.objects = sortedObjects # XXX XXX XXX XXX TURN OFF HERE
"""
# Draw all the objects...
for i in range(len(self.objects)):
curObj = self.objects[i]
nextObj = self.objects[(i+1)%len(self.objects)]
# Skip draw?
if curObj.skipDraw:
continue
# Prepare to cull object if it is marked destroy
if curObj.destroy:
destroy.append(i)
# Blanking (on the way in), if set
if curObj.doBlanking:
p = curObj.firstPt
p = (p[0], p[1], 0, 0, 0)
for x in range(BLANK_SAMPLE_PTS):
yield p
# Draw the object
if not curObj.drawn:
yield curObj.firstPt # This was cached upfront
for x in curObj.produce():
yield x
"""
# XXX: BULLET SPECIFIC -- Remove?
if type(curObj) == Bullet:
# Paint last pt for smoothness
# XXX: Remove?
for x in xrange(BLANK_SAMPLE_PTS):
yield curObj.firstPt
# Paint empty for smoothness
# XXX: Remove?
for x in xrange(BLANK_SAMPLE_PTS):
yield (curObj.lastPt[0], curObj.lastPt[1],
0, 0, 0)
"""
# Blanking (on the way out), if set
if curObj.doBlanking:
p = curObj.lastPt
p = (p[0], p[1], 0, 0, 0)
for x in range(BLANK_SAMPLE_PTS):
yield p
# Now, track to the next object.
lastX = curObj.lastPt[0]
lastY = curObj.lastPt[1]
xDiff = curObj.lastPt[0] - nextObj.firstPt[0]
yDiff = curObj.lastPt[1] - nextObj.firstPt[1]
mv = TRACKING_SAMPLE_PTS
for i in xrange(mv):
percent = i/float(mv)
xb = int(lastX - xDiff*percent)
yb = int(lastY - yDiff*percent)
# If we want to 'see' the tracking path (debug)
if SHOW_TRACKING_PATH:
yield (xb, yb, 0, CMAX, 0)
else:
yield (xb, yb, 0, 0, 0)
# Reset object state (nasty hack for point caching)
for b in self.objects:
b.drawn = False
# Items to destroy
#print destroy
destroy.sort()
destroy.reverse()
for i in destroy:
self.objects.pop(i)
except Exception as e:
import sys, traceback
while True:
print '\n---------------------'
print 'PointStream Exception: %s' % e
traceback.print_tb(sys.exc_info()[2])
print "---------------------\n"
def read(self, n):
d = [self.stream.next() for i in xrange(n)]
return d
| {
"content_hash": "a076be446b040246bada559ed2df1f70",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 76,
"avg_line_length": 35.58083832335329,
"alnum_prop": 0.42998990238976775,
"repo_name": "topher515/laser-fingers",
"id": "3f348b171e4f1e955e5483904f78480c7eba6972",
"size": "5942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pointstream.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58020"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
} |
import morepath
class App(morepath.App):
pass
class Foo:
pass
@App.path(path="foo", model=Foo)
def get_foo():
return Foo()
| {
"content_hash": "82c164e2bd50a17e124f67eb5bc0d075",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 32,
"avg_line_length": 10.071428571428571,
"alnum_prop": 0.6312056737588653,
"repo_name": "morepath/morepath",
"id": "1e284d1a9749e4ec9f0daecd10b6a634da5d1659",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture_packages/base/base/m.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "436524"
}
],
"symlink_target": ""
} |
"""Unit test for treadmill.scheduler.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
import unittest
import sys
# Disable W0611: Unused import
import tests.treadmill_test_skip_windows # pylint: disable=W0611
import mock
import numpy as np
import six
from treadmill import scheduler
_TRAITS = dict()
# Helper functions to convert user readable traits to bit mask.
def _trait2int(trait):
if trait not in _TRAITS:
_TRAITS[trait] = len(_TRAITS) + 1
return 2 ** _TRAITS[trait]
def _traits2int(traits):
return six.moves.reduce(
lambda acc, t: acc | _trait2int(t),
traits,
0
)
def app_list(count, name, *args, **kwargs):
"""Return list of apps."""
return [scheduler.Application(name + '-' + str(idx),
*args, affinity=name, **kwargs)
for idx in range(0, count)]
class OpsTest(unittest.TestCase):
"""Test comparison operators."""
# Disable warning accessing protected members.
#
# pylint: disable=W0212
def test_ops(self):
"""Test comparison operators."""
self.assertTrue(scheduler._all_gt([3, 3], [2, 2]))
self.assertTrue(scheduler._any_gt([3, 2], [2, 2]))
self.assertFalse(scheduler._all_gt([3, 2], [2, 2]))
self.assertTrue(scheduler._all_lt([2, 2], [3, 3]))
class AllocationTest(unittest.TestCase):
"""treadmill.scheduler.Allocation tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 2
super(AllocationTest, self).setUp()
def test_utilization(self):
"""Test utilization calculation."""
alloc = scheduler.Allocation([10, 10])
alloc.add(scheduler.Application('app1', 100, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 100, [2, 2], 'app1'))
alloc.add(scheduler.Application('app3', 100, [3, 3], 'app1'))
# First element is rank.
util_q = list(alloc.utilization_queue([20, 20]))
self.assertEqual(100, util_q[0][0])
self.assertEqual(100, util_q[1][0])
self.assertEqual(100, util_q[2][0])
# Second and third elememts is before / after utilization.
self.assertEqual(-10 / (10. + 20), util_q[0][1])
self.assertEqual(-9 / (10. + 20), util_q[0][2])
self.assertEqual(-7 / (10. + 20), util_q[1][2])
self.assertEqual(-9 / (10. + 20), util_q[1][1])
self.assertEqual(-4 / (10. + 20), util_q[2][2])
self.assertEqual(-7 / (10. + 20), util_q[2][1])
# Applications are sorted by priority.
alloc = scheduler.Allocation([10, 10])
alloc.add(scheduler.Application('app1', 10, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 50, [2, 2], 'app1'))
alloc.add(scheduler.Application('app3', 100, [3, 3], 'app1'))
util_q = list(alloc.utilization_queue([20., 20.]))
self.assertEqual(-10 / (10. + 20), util_q[0][1])
self.assertEqual(-7 / (10. + 20), util_q[0][2])
self.assertEqual(-7 / (10. + 20), util_q[1][1])
self.assertEqual(-5 / (10. + 20), util_q[1][2])
self.assertEqual(-5 / (10. + 20), util_q[2][1])
self.assertEqual(-4 / (10. + 20), util_q[2][2])
def test_running_order(self):
"""Test apps are ordered by status (running first) for same prio."""
alloc = scheduler.Allocation([10, 10])
alloc.add(scheduler.Application('app1', 5, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 5, [2, 2], 'app1'))
alloc.add(scheduler.Application('app3', 5, [3, 3], 'app1'))
queue = list(alloc.utilization_queue([20., 20.]))
self.assertEqual(alloc.apps['app1'], queue[0][-1])
alloc.apps['app2'].server = 'abc'
queue = list(alloc.utilization_queue([20., 20.]))
self.assertEqual(alloc.apps['app2'], queue[0][-1])
def test_utilization_max(self):
"""Tests max utilization cap on the allocation."""
alloc = scheduler.Allocation([3, 3])
alloc.add(scheduler.Application('app1', 1, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 1, [2, 2], 'app1'))
alloc.add(scheduler.Application('app3', 1, [3, 3], 'app1'))
self.assertEqual(3, len(list(alloc.utilization_queue([20., 20.]))))
# Now set max_utilization to 1
alloc.max_utilization = 1
# XXX: Broken test. Needs upgrade to V3
# XXX:
# XXX: self.assertEqual(
# XXX: 2,
# XXX: len(list(alloc.utilization_queue([20., 20.])))
# XXX: )
alloc.set_max_utilization(None)
self.assertEqual(3, len(list(alloc.utilization_queue([20., 20.]))))
def test_priority_zero(self):
"""Tests priority zero apps."""
alloc = scheduler.Allocation([3, 3])
alloc.add(scheduler.Application('app1', 1, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 0, [2, 2], 'app1'))
# default max_utilization still lets prio 0 apps through
queue = alloc.utilization_queue([20., 20.])
self.assertEqual([100, 100], [item[0] for item in queue])
alloc.set_max_utilization(100)
# setting max_utilization will cut off prio 0 apps
queue = alloc.utilization_queue([20., 20.])
self.assertEqual(
[100, sys.maxsize],
[item[0] for item in queue]
)
def test_rank_adjustment(self):
"""Test rank adjustment"""
alloc = scheduler.Allocation()
alloc.update([3, 3], 100, 10)
alloc.add(scheduler.Application('app1', 1, [1, 1], 'app1'))
alloc.add(scheduler.Application('app2', 1, [2, 2], 'app1'))
alloc.add(scheduler.Application('app3', 1, [3, 3], 'app1'))
queue = list(alloc.utilization_queue([20., 20.]))
self.assertEqual(90, queue[0][0])
self.assertEqual(90, queue[1][0])
self.assertEqual(100, queue[2][0])
def test_zerovector(self):
"""Test updating allocation with allocation vector containing 0's"""
alloc = scheduler.Allocation(None)
alloc.update([1, 0], None, None)
self.assertEqual(1.0, alloc.reserved[0])
self.assertEqual(0, alloc.reserved[1])
def test_utilization_no_reservation(self):
"""Checks that any utilization without reservation is VERY large."""
alloc = scheduler.Allocation(None)
alloc.add(scheduler.Application('app1', 1, [1., 1.], 'app1'))
queue = list(alloc.utilization_queue(np.array([10., 10.])))
self.assertEqual(0 / 10, queue[0][1])
self.assertEqual(1 / 10, queue[0][2])
def test_duplicate(self):
"""Checks behavior when adding duplicate app."""
alloc = scheduler.Allocation(None)
alloc.add(scheduler.Application('app1', 0, [1, 1], 'app1'))
self.assertEqual(
1, len(list(alloc.utilization_queue(np.array([5., 5.])))))
alloc.add(scheduler.Application('app1', 0, [1, 1], 'app1'))
self.assertEqual(
1, len(list(alloc.utilization_queue(np.array([5., 5.])))))
def test_sub_allocs(self):
"""Test utilization calculation with sub-allocs."""
alloc = scheduler.Allocation([3, 3])
self.assertEqual(3, alloc.total_reserved()[0])
queue = list(alloc.utilization_queue([20., 20.]))
sub_alloc_a = scheduler.Allocation([5, 5])
alloc.add_sub_alloc('a1/a', sub_alloc_a)
self.assertEqual(8, alloc.total_reserved()[0])
sub_alloc_a.add(scheduler.Application('1a', 3, [2, 2], 'app1'))
sub_alloc_a.add(scheduler.Application('2a', 2, [3, 3], 'app1'))
sub_alloc_a.add(scheduler.Application('3a', 1, [5, 5], 'app1'))
queue = list(alloc.utilization_queue([20., 20.]))
_rank, _util_b, util_a, _pending, _order, app = queue[0]
self.assertEqual('1a', app.name)
self.assertEqual((2 - (5 + 3)) / (20 + (5 + 3)), util_a)
sub_alloc_b = scheduler.Allocation([10, 10])
alloc.add_sub_alloc('a1/b', sub_alloc_b)
sub_alloc_b.add(scheduler.Application('1b', 3, [2, 2], 'app1'))
sub_alloc_b.add(scheduler.Application('2b', 2, [3, 3], 'app1'))
sub_alloc_b.add(scheduler.Application('3b', 1, [5, 5], 'app1'))
queue = list(alloc.utilization_queue([20., 20.]))
self.assertEqual(6, len(queue))
self.assertEqual(18, alloc.total_reserved()[0])
# For each sub-alloc (and self) the least utilized app is 1.
# The sub_allloc_b is largest, so utilization smallest, 1b will be
# first.
_rank, _util_b, util_a, _pending, _order, app = queue[0]
self.assertEqual('1b', app.name)
self.assertEqual((2 - 18) / (20 + 18), util_a)
# Add prio 0 app to each, make sure they all end up last.
alloc.add(scheduler.Application('1-zero', 0, [2, 2], 'app1'))
sub_alloc_b.add(scheduler.Application('b-zero', 0, [5, 5], 'app1'))
sub_alloc_a.add(scheduler.Application('a-zero', 0, [5, 5], 'app1'))
queue = list(alloc.utilization_queue([20., 20.]))
self.assertIn('1-zero', [item[-1].name for item in queue[-3:]])
self.assertIn('a-zero', [item[-1].name for item in queue[-3:]])
self.assertIn('b-zero', [item[-1].name for item in queue[-3:]])
# Check that utilization of prio 0 apps is always max float.
self.assertEqual(
[float('inf')] * 3,
[
util_b
for (_rank,
util_b,
_util_a,
_pending,
_order,
_app) in queue[-3:]
]
)
def test_sub_alloc_reservation(self):
"""Test utilization calculation is fair between sub-allocs."""
alloc = scheduler.Allocation()
sub_alloc_poor = scheduler.Allocation()
alloc.add_sub_alloc('poor', sub_alloc_poor)
sub_alloc_poor.add(scheduler.Application('p1', 1, [1, 1], 'app1'))
sub_alloc_rich = scheduler.Allocation([5, 5])
sub_alloc_rich.add(scheduler.Application('r1', 1, [5, 5], 'app1'))
sub_alloc_rich.add(scheduler.Application('r2', 1, [5, 5], 'app1'))
alloc.add_sub_alloc('rich', sub_alloc_rich)
queue = list(alloc.utilization_queue([20., 20.]))
self.assertEqual('r1', queue[0][-1].name)
self.assertEqual('p1', queue[1][-1].name)
self.assertEqual('r2', queue[2][-1].name)
def test_visitor(self):
"""Test queue visitor"""
alloc = scheduler.Allocation()
sub_alloc_a = scheduler.Allocation()
sub_alloc_a.add(scheduler.Application('a1', 1, [1, 1], 'app1'))
alloc.add_sub_alloc('a', sub_alloc_a)
sub_alloc_b = scheduler.Allocation()
sub_alloc_b.add(scheduler.Application('b1', 1, [5, 5], 'app1'))
sub_alloc_b.add(scheduler.Application('b2', 1, [5, 5], 'app1'))
alloc.add_sub_alloc('b', sub_alloc_b)
result = []
def _visitor(_alloc, entry, _acc_demand):
result.append(entry)
list(alloc.utilization_queue([20., 20.],
visitor=_visitor))
self.assertEqual(6, len(result))
class TraitSetTest(unittest.TestCase):
"""treadmill.scheduler.TraitSet tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 2
super(TraitSetTest, self).setUp()
def test_traits(self):
"""Test trait inheritance."""
trait_a = int('0b0000001', 2)
trait_x = int('0b0000100', 2)
trait_y = int('0b0001000', 2)
trait_z = int('0b0010000', 2)
fset_a = scheduler.TraitSet(trait_a)
fset_xz = scheduler.TraitSet(trait_x | trait_z)
fset_xy = scheduler.TraitSet(trait_x | trait_y)
self.assertTrue(fset_a.has(trait_a))
fset_a.add('xy', fset_xy.traits)
self.assertTrue(fset_a.has(trait_a))
self.assertTrue(fset_a.has(trait_x))
self.assertTrue(fset_a.has(trait_y))
fset_a.add('xz', fset_xz.traits)
self.assertTrue(fset_a.has(trait_x))
self.assertTrue(fset_a.has(trait_y))
self.assertTrue(fset_a.has(trait_z))
fset_a.remove('xy')
self.assertTrue(fset_a.has(trait_x))
self.assertFalse(fset_a.has(trait_y))
self.assertTrue(fset_a.has(trait_z))
fset_a.remove('xz')
self.assertFalse(fset_a.has(trait_x))
self.assertFalse(fset_a.has(trait_y))
self.assertFalse(fset_a.has(trait_z))
class NodeTest(unittest.TestCase):
"""treadmill.scheduler.Allocation tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 2
super(NodeTest, self).setUp()
def test_bucket_capacity(self):
"""Tests adjustment of bucket capacity up and down."""
parent = scheduler.Bucket('top')
bucket = scheduler.Bucket('b')
parent.add_node(bucket)
srv1 = scheduler.Server('n1', [10, 5], valid_until=500)
bucket.add_node(srv1)
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 5.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 5.])))
srv2 = scheduler.Server('n2', [5, 10], valid_until=500)
bucket.add_node(srv2)
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 10.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 10.])))
srv3 = scheduler.Server('n3', [3, 3], valid_until=500)
bucket.add_node(srv3)
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 10.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 10.])))
bucket.remove_node_by_name('n3')
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 10.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 10.])))
bucket.remove_node_by_name('n1')
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([5., 10.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([5., 10.])))
def test_app_node_placement(self):
"""Tests capacity adjustments for app placement."""
parent = scheduler.Bucket('top')
bucket = scheduler.Bucket('a_bucket')
parent.add_node(bucket)
srv1 = scheduler.Server('n1', [10, 5], valid_until=500)
bucket.add_node(srv1)
srv2 = scheduler.Server('n2', [10, 5], valid_until=500)
bucket.add_node(srv2)
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 5.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 5.])))
self.assertTrue(np.array_equal(bucket.size(None),
np.array([20., 10.])))
# Create 10 identical apps.
apps = app_list(10, 'app', 50, [1, 2])
self.assertTrue(srv1.put(apps[0]))
# Capacity of buckets should not change, other node is intact.
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([10., 5.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([10., 5.])))
self.assertTrue(srv1.put(apps[1]))
self.assertTrue(srv2.put(apps[2]))
self.assertTrue(np.array_equal(bucket.free_capacity,
np.array([9., 3.])))
self.assertTrue(np.array_equal(parent.free_capacity,
np.array([9., 3.])))
def test_bucket_placement(self):
"""Tests placement strategies."""
top = scheduler.Bucket('top')
a_bucket = scheduler.Bucket('a_bucket')
top.add_node(a_bucket)
b_bucket = scheduler.Bucket('b_bucket')
top.add_node(b_bucket)
a1_srv = scheduler.Server('a1_srv', [10, 10], valid_until=500)
a_bucket.add_node(a1_srv)
a2_srv = scheduler.Server('a2_srv', [10, 10], valid_until=500)
a_bucket.add_node(a2_srv)
b1_srv = scheduler.Server('b1_srv', [10, 10], valid_until=500)
b_bucket.add_node(b1_srv)
b2_srv = scheduler.Server('b2_srv', [10, 10], valid_until=500)
b_bucket.add_node(b2_srv)
# bunch of apps with the same affinity
apps1 = app_list(10, 'app1', 50, [1, 1])
apps2 = app_list(10, 'app2', 50, [1, 1])
# Default strategy is spread, so placing 4 apps1 will result in each
# node having one app.
self.assertTrue(top.put(apps1[0]))
self.assertTrue(top.put(apps1[1]))
self.assertTrue(top.put(apps1[2]))
self.assertTrue(top.put(apps1[3]))
# from top level, it will spread between a and b buckets, so first
# two apps go to a1_srv, b1_srv respectively.
#
# 3rd app - buckets rotate, and a bucket is preferred again. Inside the
# bucket, next node is chosed. Same for 4th app.
#
# Result is the after 4 placements they are spread evenly.
#
self.assertEqual(1, len(a1_srv.apps))
self.assertEqual(1, len(a2_srv.apps))
self.assertEqual(1, len(b1_srv.apps))
self.assertEqual(1, len(b2_srv.apps))
a_bucket.set_affinity_strategy('app2', scheduler.PackStrategy)
self.assertTrue(top.put(apps2[0]))
self.assertTrue(top.put(apps2[1]))
self.assertTrue(top.put(apps2[2]))
self.assertTrue(top.put(apps2[3]))
# B bucket still uses spread strategy.
self.assertEqual(2, len(b1_srv.apps))
self.assertEqual(2, len(b2_srv.apps))
# Without predicting exact placement, apps will be placed on one of
# the servers in A bucket but not the other, as they use pack strateg.
self.assertNotEqual(len(a1_srv.apps), len(a2_srv.apps))
def test_valid_times(self):
"""Tests node valid_until calculation."""
top = scheduler.Bucket('top', traits=_traits2int(['top']))
left = scheduler.Bucket('left', traits=_traits2int(['left']))
right = scheduler.Bucket('right', traits=_traits2int(['right']))
srv_a = scheduler.Server('a', [10, 10], traits=_traits2int(['a', '0']),
valid_until=1)
srv_b = scheduler.Server('b', [10, 10], traits=_traits2int(['b', '0']),
valid_until=2)
srv_y = scheduler.Server('y', [10, 10], traits=_traits2int(['y', '1']),
valid_until=3)
srv_z = scheduler.Server('z', [10, 10], traits=_traits2int(['z', '1']),
valid_until=4)
top.add_node(left)
top.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
self.assertEqual(top.valid_until, 4)
self.assertEqual(left.valid_until, 2)
self.assertEqual(right.valid_until, 4)
left.remove_node_by_name('a')
self.assertEqual(top.valid_until, 4)
self.assertEqual(left.valid_until, 2)
self.assertEqual(right.valid_until, 4)
right.remove_node_by_name('z')
self.assertEqual(top.valid_until, 3)
self.assertEqual(left.valid_until, 2)
self.assertEqual(right.valid_until, 3)
def test_node_traits(self):
"""Tests node trait inheritance."""
top = scheduler.Bucket('top', traits=_traits2int(['top']))
left = scheduler.Bucket('left', traits=_traits2int(['left']))
right = scheduler.Bucket('right', traits=_traits2int(['right']))
srv_a = scheduler.Server('a', [10, 10], traits=_traits2int(['a', '0']),
valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=_traits2int(['b', '0']),
valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=_traits2int(['y', '1']),
valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=_traits2int(['z', '1']),
valid_until=500)
top.add_node(left)
top.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
self.assertTrue(top.traits.has(_trait2int('a')))
self.assertTrue(top.traits.has(_trait2int('b')))
self.assertTrue(top.traits.has(_trait2int('0')))
self.assertTrue(top.traits.has(_trait2int('y')))
self.assertTrue(top.traits.has(_trait2int('z')))
self.assertTrue(top.traits.has(_trait2int('1')))
self.assertTrue(left.traits.has(_trait2int('a')))
self.assertTrue(left.traits.has(_trait2int('b')))
self.assertTrue(left.traits.has(_trait2int('0')))
self.assertFalse(left.traits.has(_trait2int('y')))
self.assertFalse(left.traits.has(_trait2int('z')))
self.assertFalse(left.traits.has(_trait2int('1')))
left.remove_node_by_name('a')
self.assertFalse(left.traits.has(_trait2int('a')))
self.assertTrue(left.traits.has(_trait2int('b')))
self.assertTrue(left.traits.has(_trait2int('0')))
self.assertFalse(top.traits.has(_trait2int('a')))
self.assertTrue(top.traits.has(_trait2int('b')))
self.assertTrue(top.traits.has(_trait2int('0')))
left.remove_node_by_name('b')
self.assertFalse(left.traits.has(_trait2int('b')))
self.assertFalse(left.traits.has(_trait2int('0')))
self.assertFalse(top.traits.has(_trait2int('b')))
self.assertFalse(top.traits.has(_trait2int('0')))
def test_app_trait_placement(self):
"""Tests placement of app with traits."""
top = scheduler.Bucket('top', traits=_traits2int(['top']))
left = scheduler.Bucket('left', traits=_traits2int(['left']))
right = scheduler.Bucket('right', traits=_traits2int(['right']))
srv_a = scheduler.Server('a', [10, 10], traits=_traits2int(['a', '0']),
valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=_traits2int(['b', '0']),
valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=_traits2int(['y', '1']),
valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=_traits2int(['z', '1']),
valid_until=500)
top.add_node(left)
top.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
alloc_a = scheduler.Allocation(traits=_traits2int(['a']))
apps_a = app_list(10, 'app_a', 50, [2, 2])
for app in apps_a:
alloc_a.add(app)
# srv_a is the only one with trait 'a'.
self.assertTrue(top.put(apps_a[0]))
self.assertTrue(top.put(apps_a[1]))
self.assertIn(apps_a[0].name, srv_a.apps)
self.assertIn(apps_a[1].name, srv_a.apps)
alloc_0 = scheduler.Allocation(traits=_traits2int(['0']))
apps_0 = app_list(10, 'app_0', 50, [2, 2])
for app in apps_0:
alloc_0.add(app)
# '0' trait - two servers, will spread by default.
self.assertTrue(top.put(apps_0[0]))
self.assertTrue(top.put(apps_0[1]))
self.assertIn(apps_0[0].name, srv_a.apps)
self.assertIn(apps_0[1].name, srv_b.apps)
# Prev implementation propagated traits from parent to children,
# so "right" trait propagated to leaf servers.
#
# This behavior is removed, so placing app with "right" trait will
# fail.
#
# alloc_r1 = scheduler.Allocation(traits=_traits2int(['right', '1']))
# apps_r1 = app_list(10, 'app_r1', 50, [2, 2])
# for app in apps_r1:
# alloc_r1.add(app)
# self.assertTrue(top.put(apps_r1[0]))
# self.assertTrue(top.put(apps_r1[1]))
# self.assertIn(apps_r1[0].name, srv_y.apps)
# self.assertIn(apps_r1[1].name, srv_z.apps)
apps_nothing = app_list(10, 'apps_nothing', 50, [1, 1])
# All nodes fit. Spead first between buckets, then between nodes.
# top
# left right
# a b y z
self.assertTrue(top.put(apps_nothing[0]))
self.assertTrue(top.put(apps_nothing[1]))
self.assertTrue(
(
apps_nothing[0].server in ['a', 'b'] and
apps_nothing[1].server in ['y', 'z']
) or
(
apps_nothing[0].server in ['y', 'z'] and
apps_nothing[1].server in ['a', 'b']
)
)
self.assertTrue(top.put(apps_nothing[2]))
self.assertTrue(top.put(apps_nothing[3]))
self.assertTrue(
(
apps_nothing[2].server in ['a', 'b'] and
apps_nothing[3].server in ['y', 'z']
) or
(
apps_nothing[2].server in ['y', 'z'] and
apps_nothing[3].server in ['a', 'b']
)
)
def test_size_and_members(self):
"""Tests recursive size calculation."""
top = scheduler.Bucket('top', traits=_traits2int(['top']))
left = scheduler.Bucket('left', traits=_traits2int(['left']))
right = scheduler.Bucket('right', traits=_traits2int(['right']))
srv_a = scheduler.Server('a', [1, 1], traits=_traits2int(['a', '0']),
valid_until=500)
srv_b = scheduler.Server('b', [1, 1], traits=_traits2int(['b', '0']),
valid_until=500)
srv_y = scheduler.Server('y', [1, 1], traits=_traits2int(['y', '1']),
valid_until=500)
srv_z = scheduler.Server('z', [1, 1], traits=_traits2int(['z', '1']),
valid_until=500)
top.add_node(left)
top.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
# pylint: disable=W0212
self.assertTrue(scheduler._all_isclose(srv_a.size(None), [1, 1]))
self.assertTrue(scheduler._all_isclose(left.size(None), [2, 2]))
self.assertTrue(scheduler._all_isclose(top.size(None), [4, 4]))
self.assertEqual(
{
'a': srv_a,
'b': srv_b,
'y': srv_y,
'z': srv_z
},
top.members()
)
def test_affinity_counters(self):
"""Tests affinity counters."""
top = scheduler.Bucket('top', traits=_traits2int(['top']))
left = scheduler.Bucket('left', traits=_traits2int(['left']))
right = scheduler.Bucket('right', traits=_traits2int(['right']))
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=0, valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=0, valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=0, valid_until=500)
top.add_node(left)
top.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
apps_a = app_list(10, 'app_a', 50, [1, 1])
self.assertTrue(srv_a.put(apps_a[0]))
self.assertEqual(1, srv_a.affinity_counters['app_a'])
self.assertEqual(1, left.affinity_counters['app_a'])
self.assertEqual(1, top.affinity_counters['app_a'])
srv_z.put(apps_a[0])
self.assertEqual(1, srv_z.affinity_counters['app_a'])
self.assertEqual(1, left.affinity_counters['app_a'])
self.assertEqual(2, top.affinity_counters['app_a'])
srv_a.remove(apps_a[0].name)
self.assertEqual(0, srv_a.affinity_counters['app_a'])
self.assertEqual(0, left.affinity_counters['app_a'])
self.assertEqual(1, top.affinity_counters['app_a'])
class CellTest(unittest.TestCase):
"""treadmill.scheduler.Cell tests."""
def setUp(self):
scheduler.DIMENSION_COUNT = 2
super(CellTest, self).setUp()
def test_emtpy(self):
"""Simple test to test empty bucket"""
cell = scheduler.Cell('top')
empty = scheduler.Bucket('empty', traits=0)
cell.add_node(empty)
bucket = scheduler.Bucket('bucket', traits=0)
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
bucket.add_node(srv_a)
cell.add_node(bucket)
cell.schedule()
def test_labels(self):
"""Test scheduling with labels."""
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srv_a = scheduler.Server('a_xx', [10, 10], valid_until=500, label='xx')
srv_b = scheduler.Server('b', [10, 10], valid_until=500)
srv_y = scheduler.Server('y_xx', [10, 10], valid_until=500, label='xx')
srv_z = scheduler.Server('z', [10, 10], valid_until=500)
cell.add_node(left)
cell.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
app1 = scheduler.Application('app1', 4, [1, 1], 'app')
app2 = scheduler.Application('app2', 3, [2, 2], 'app')
app3 = scheduler.Application('app_xx_3', 2, [3, 3], 'app')
app4 = scheduler.Application('app_xx_4', 1, [4, 4], 'app')
cell.partitions[None].allocation.add(app1)
cell.partitions[None].allocation.add(app2)
cell.partitions['xx'].allocation.add(app3)
cell.partitions['xx'].allocation.add(app4)
cell.schedule()
self.assertIn(app1.server, ['b', 'z'])
self.assertIn(app2.server, ['b', 'z'])
self.assertIn(app3.server, ['a_xx', 'y_xx'])
self.assertIn(app4.server, ['a_xx', 'y_xx'])
def test_simple(self):
"""Simple placement test."""
# pylint - too many lines.
#
# pylint: disable=R0915
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=0, valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=0, valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=0, valid_until=500)
cell.add_node(left)
cell.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
app1 = scheduler.Application('app1', 4, [1, 1], 'app')
app2 = scheduler.Application('app2', 3, [2, 2], 'app')
app3 = scheduler.Application('app3', 2, [3, 3], 'app')
app4 = scheduler.Application('app4', 1, [4, 4], 'app')
cell.partitions[None].allocation.add(app1)
cell.partitions[None].allocation.add(app2)
cell.partitions[None].allocation.add(app3)
cell.partitions[None].allocation.add(app4)
cell.schedule()
self.assertEqual(
set([app1.server, app2.server, app3.server, app4.server]),
set(['a', 'y', 'b', 'z'])
)
srv1 = app1.server
srv2 = app2.server
srv3 = app3.server
srv4 = app4.server
# Add high priority app that needs entire cell
app_prio50 = scheduler.Application('prio50', 50, [10, 10], 'app')
cell.partitions[None].allocation.add(app_prio50)
cell.schedule()
# The queue is ordered by priority:
# - prio50, app1, app2, app3, app4
#
# As placement not found for prio50, app4 will be evicted first.
#
# As result, prio50 will be placed on 'z', and app4 (evicted) will be
# placed on "next" server, which is 'a'.
self.assertEqual(app_prio50.server, srv4)
self.assertEqual(app4.server, srv1)
app_prio51 = scheduler.Application('prio51', 51, [10, 10], 'app')
cell.partitions[None].allocation.add(app_prio51)
cell.schedule()
# app4 is now colocated with app1. app4 will still be evicted first,
# then app3, at which point there will be enough capacity to place
# large app.
#
# app3 will be rescheduled to run on "next" server - 'y', and app4 will
# be restored to 'a'.
self.assertEqual(app_prio51.server, srv3)
self.assertEqual(app_prio50.server, srv4)
self.assertEqual(app4.server, srv1)
app_prio49_1 = scheduler.Application('prio49_1', 49, [10, 10], 'app')
app_prio49_2 = scheduler.Application('prio49_2', 49, [9, 9], 'app')
cell.partitions[None].allocation.add(app_prio49_1)
cell.partitions[None].allocation.add(app_prio49_2)
cell.schedule()
# 50/51 not moved. from the end of the queue,
self.assertEqual(app_prio51.server, srv3)
self.assertEqual(app_prio50.server, srv4)
self.assertEqual(
set([app_prio49_1.server, app_prio49_2.server]),
set([srv1, srv2])
)
# Only capacity left for small [1, 1] app.
self.assertIsNotNone(app1.server)
self.assertIsNone(app2.server)
self.assertIsNone(app3.server)
self.assertIsNone(app4.server)
def test_max_utilization(self):
"""Test max-utilization is handled properly when priorities change"""
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=0, valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=0, valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=0, valid_until=500)
cell.add_node(left)
cell.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
app1 = scheduler.Application('app1', 4, [1, 1], 'app')
app2 = scheduler.Application('app2', 3, [2, 2], 'app')
app3 = scheduler.Application('app3', 2, [3, 3], 'app')
app4 = scheduler.Application('app4', 1, [4, 4], 'app')
cell.partitions[None].allocation.add(app1)
cell.partitions[None].allocation.add(app2)
cell.partitions[None].allocation.add(app3)
cell.partitions[None].allocation.add(app4)
cell.partitions[None].allocation.set_reserved([6, 6])
cell.partitions[None].allocation.set_max_utilization(1)
cell.schedule()
self.assertIsNotNone(app1.server)
self.assertIsNotNone(app2.server)
self.assertIsNotNone(app3.server)
self.assertIsNone(app4.server)
app4.priority = 5
cell.schedule()
self.assertIsNotNone(app1.server)
self.assertIsNone(app2.server)
self.assertIsNone(app3.server)
self.assertIsNotNone(app4.server)
def test_affinity_limits(self):
"""Simple placement test."""
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=0, valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=0, valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=0, valid_until=500)
cell.add_node(left)
cell.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
left.level = 'rack'
right.level = 'rack'
apps = app_list(10, 'app', 50, [1, 1],
affinity_limits={'server': 1})
cell.add_app(cell.partitions[None].allocation, apps[0])
cell.add_app(cell.partitions[None].allocation, apps[1])
cell.add_app(cell.partitions[None].allocation, apps[2])
cell.add_app(cell.partitions[None].allocation, apps[3])
cell.add_app(cell.partitions[None].allocation, apps[4])
cell.schedule()
self.assertIsNotNone(apps[0].server)
self.assertIsNotNone(apps[1].server)
self.assertIsNotNone(apps[2].server)
self.assertIsNotNone(apps[3].server)
self.assertIsNone(apps[4].server)
for app in apps:
cell.remove_app(app.name)
apps = app_list(10, 'app', 50, [1, 1],
affinity_limits={'server': 1, 'rack': 1})
cell.add_app(cell.partitions[None].allocation, apps[0])
cell.add_app(cell.partitions[None].allocation, apps[1])
cell.add_app(cell.partitions[None].allocation, apps[2])
cell.add_app(cell.partitions[None].allocation, apps[3])
cell.schedule()
self.assertIsNotNone(apps[0].server)
self.assertIsNotNone(apps[1].server)
self.assertIsNone(apps[2].server)
self.assertIsNone(apps[3].server)
for app in apps:
cell.remove_app(app.name)
apps = app_list(10, 'app', 50, [1, 1],
affinity_limits={'server': 1, 'rack': 2, 'cell': 3})
cell.add_app(cell.partitions[None].allocation, apps[0])
cell.add_app(cell.partitions[None].allocation, apps[1])
cell.add_app(cell.partitions[None].allocation, apps[2])
cell.add_app(cell.partitions[None].allocation, apps[3])
cell.schedule()
self.assertIsNotNone(apps[0].server)
self.assertIsNotNone(apps[1].server)
self.assertIsNotNone(apps[2].server)
self.assertIsNone(apps[3].server)
@mock.patch('time.time', mock.Mock(return_value=0))
def test_data_retention(self):
"""Tests data retention."""
# Disable pylint's too many statements warning.
#
# pylint: disable=R0915
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srvs = {
'a': scheduler.Server('a', [10, 10], traits=0, valid_until=500),
'b': scheduler.Server('b', [10, 10], traits=0, valid_until=500),
'y': scheduler.Server('y', [10, 10], traits=0, valid_until=500),
'z': scheduler.Server('z', [10, 10], traits=0, valid_until=500),
}
cell.add_node(left)
cell.add_node(right)
left.add_node(srvs['a'])
left.add_node(srvs['b'])
right.add_node(srvs['y'])
right.add_node(srvs['z'])
left.level = 'rack'
right.level = 'rack'
time.time.return_value = 100
sticky_apps = app_list(10, 'sticky', 50, [1, 1],
affinity_limits={'server': 1, 'rack': 1},
data_retention_timeout=30)
unsticky_app = scheduler.Application('unsticky', 10, [1., 1.],
'unsticky',
data_retention_timeout=0)
cell.partitions[None].allocation.add(sticky_apps[0])
cell.partitions[None].allocation.add(unsticky_app)
cell.schedule()
# Both apps having different affinity, will be on same node.
first_srv = sticky_apps[0].server
self.assertEqual(sticky_apps[0].server, unsticky_app.server)
# Mark srv_a as down, unsticky app migrates right away,
# sticky stays.
srvs[first_srv].state = scheduler.State.down
cell.schedule()
self.assertEqual(sticky_apps[0].server, first_srv)
self.assertNotEqual(unsticky_app.server, first_srv)
self.assertEqual(cell.next_event_at, 130)
time.time.return_value = 110
cell.schedule()
self.assertEqual(sticky_apps[0].server, first_srv)
self.assertNotEqual(unsticky_app.server, first_srv)
self.assertEqual(cell.next_event_at, 130)
time.time.return_value = 130
cell.schedule()
self.assertNotEqual(sticky_apps[0].server, first_srv)
self.assertNotEqual(unsticky_app.server, first_srv)
self.assertEqual(cell.next_event_at, np.inf)
second_srv = sticky_apps[0].server
# Mark srv_a as up, srv_y as down.
srvs[first_srv].state = scheduler.State.up
srvs[second_srv].state = scheduler.State.down
cell.schedule()
self.assertEqual(sticky_apps[0].server, second_srv)
self.assertNotEqual(unsticky_app.server, second_srv)
self.assertEqual(cell.next_event_at, 160)
# Schedule one more sticky app. As it has rack affinity limit 1, it
# can't to to right (x,y) rack, rather will end up in left (a,b) rack.
#
# Other sticky apps will be pending.
time.time.return_value = 135
cell.partitions[None].allocation.add(sticky_apps[1])
cell.partitions[None].allocation.add(sticky_apps[2])
cell.schedule()
# Original app still on 'y', timeout did not expire
self.assertEqual(sticky_apps[0].server, second_srv)
# next sticky app is on (a,b) rack.
# self.assertIn(sticky_apps[1].server, ['a', 'b'])
# The 3rd sticky app pending, as rack affinity taken by currently
# down node y.
self.assertIsNone(sticky_apps[2].server)
srvs[second_srv].state = scheduler.State.up
cell.schedule()
# Original app still on 'y', timeout did not expire
self.assertEqual(sticky_apps[0].server, second_srv)
# next sticky app is on (a,b) rack.
# self.assertIn(sticky_apps[1].server, ['a', 'b'])
# The 3rd sticky app pending, as rack affinity taken by currently
# app[0] on node y.
self.assertIsNone(sticky_apps[2].server)
def test_serialization(self):
"""Tests cell serialization."""
# Disable pylint's too many statements warning.
#
# pylint: disable=R0915
cell = scheduler.Cell('top')
left = scheduler.Bucket('left', traits=0)
right = scheduler.Bucket('right', traits=0)
srv_a = scheduler.Server('a', [10, 10], traits=0, valid_until=500)
srv_b = scheduler.Server('b', [10, 10], traits=0, valid_until=500)
srv_y = scheduler.Server('y', [10, 10], traits=0, valid_until=500)
srv_z = scheduler.Server('z', [10, 10], traits=0, valid_until=500)
cell.add_node(left)
cell.add_node(right)
left.add_node(srv_a)
left.add_node(srv_b)
right.add_node(srv_y)
right.add_node(srv_z)
left.level = 'rack'
right.level = 'rack'
apps = app_list(10, 'app', 50, [1, 1],
affinity_limits={'server': 1, 'rack': 1})
cell.add_app(cell.partitions[None].allocation, apps[0])
cell.add_app(cell.partitions[None].allocation, apps[1])
cell.add_app(cell.partitions[None].allocation, apps[2])
cell.add_app(cell.partitions[None].allocation, apps[3])
cell.schedule()
# TODO: need to implement serialization.
#
# data = scheduler.dumps(cell)
# cell1 = scheduler.loads(data)
def test_identity(self):
"""Tests scheduling apps with identity."""
cell = scheduler.Cell('top')
for idx in range(0, 10):
server = scheduler.Server(str(idx), [10, 10], traits=0,
valid_until=time.time() + 1000)
cell.add_node(server)
cell.configure_identity_group('ident1', 3)
apps = app_list(10, 'app', 50, [1, 1], identity_group='ident1')
for app in apps:
cell.add_app(cell.partitions[None].allocation, app)
self.assertTrue(apps[0].acquire_identity())
self.assertEqual(set([1, 2]), apps[0].identity_group_ref.available)
self.assertEqual(set([1, 2]), apps[1].identity_group_ref.available)
cell.schedule()
self.assertEqual(apps[0].identity, 0)
self.assertEqual(apps[1].identity, 1)
self.assertEqual(apps[2].identity, 2)
for idx in range(3, 10):
self.assertIsNone(apps[idx].identity, None)
# Removing app will release the identity, and it will be aquired by
# next app in the group.
cell.remove_app('app-2')
cell.schedule()
self.assertEqual(apps[3].identity, 2)
# Increase ideneity group count to 5, expect 5 placed apps.
cell.configure_identity_group('ident1', 5)
cell.schedule()
self.assertEqual(
5,
len([app for app in apps if app.server is not None])
)
cell.configure_identity_group('ident1', 3)
cell.schedule()
self.assertEqual(
3,
len([app for app in apps if app.server is not None])
)
def test_schedule_once(self):
"""Tests schedule once trait on server down."""
cell = scheduler.Cell('top')
for idx in range(0, 10):
server = scheduler.Server(str(idx), [10, 10], traits=0,
valid_until=time.time() + 1000)
cell.add_node(server)
apps = app_list(2, 'app', 50, [6, 6], schedule_once=True)
for app in apps:
cell.add_app(cell.partitions[None].allocation, app)
cell.schedule()
self.assertNotEqual(apps[0].server, apps[1].server)
self.assertFalse(apps[0].evicted)
self.assertFalse(apps[0].evicted)
cell.children_by_name[apps[0].server].state = scheduler.State.down
cell.remove_node_by_name(apps[1].server)
cell.schedule()
self.assertIsNone(apps[0].server)
self.assertTrue(apps[0].evicted)
self.assertIsNone(apps[1].server)
self.assertTrue(apps[1].evicted)
def test_schedule_once_eviction(self):
"""Tests schedule once trait with eviction."""
cell = scheduler.Cell('top')
for idx in range(0, 10):
server = scheduler.Server(str(idx), [10, 10], traits=0,
valid_until=time.time() + 1000)
cell.add_node(server)
# Each server has capacity 10.
#
# Place two apps - capacity 1, capacity 8, they will occupy entire
# server.
#
# Try and place app with demand of 2. First it will try to evict
# small app, but it will not be enough, so it will evict large app.
#
# Check that evicted flag is set only for large app, and small app
# will be restored.
small_apps = app_list(10, 'small', 50, [1, 1], schedule_once=True)
for app in small_apps:
cell.add_app(cell.partitions[None].allocation, app)
large_apps = app_list(10, 'large', 60, [8, 8], schedule_once=True)
for app in large_apps:
cell.add_app(cell.partitions[None].allocation, app)
placement = cell.schedule()
# Check that all apps are placed.
app2server = {app: after for app, _, _, after, _ in placement
if after is not None}
self.assertEqual(len(app2server), 20)
# Add one app, higher priority than rest, will force eviction.
medium_apps = app_list(1, 'medium', 70, [5, 5])
for app in medium_apps:
cell.add_app(cell.partitions[None].allocation, app)
cell.schedule()
self.assertEqual(len([app for app in small_apps if app.evicted]), 0)
self.assertEqual(len([app for app in small_apps if app.server]), 10)
self.assertEqual(len([app for app in large_apps if app.evicted]), 1)
self.assertEqual(len([app for app in large_apps if app.server]), 9)
# Remove app, make sure the evicted app is not placed again.
cell.remove_app(medium_apps[0].name)
cell.schedule()
self.assertEqual(len([app for app in small_apps if app.evicted]), 0)
self.assertEqual(len([app for app in small_apps if app.server]), 10)
self.assertEqual(len([app for app in large_apps if app.evicted]), 1)
self.assertEqual(len([app for app in large_apps if app.server]), 9)
@mock.patch('time.time', mock.Mock(return_value=100))
def test_eviction_server_down(self):
"""Tests app restore."""
cell = scheduler.Cell('top')
large_server = scheduler.Server('large', [10, 10], traits=0,
valid_until=10000)
cell.add_node(large_server)
small_server = scheduler.Server('small', [3, 3], traits=0,
valid_until=10000)
cell.add_node(small_server)
# Create two apps one with retention other without. Set priority
# so that app with retention is on the right of the queue, when
# placement not found for app without retention, it will try to
# evict app with retention.
app_no_retention = scheduler.Application('a1', 100, [4, 4], 'app')
app_with_retention = scheduler.Application('a2', 1, [4, 4], 'app',
data_retention_timeout=3000)
cell.add_app(cell.partitions[None].allocation, app_no_retention)
cell.add_app(cell.partitions[None].allocation, app_with_retention)
cell.schedule()
# At this point, both apps are on large server, as small server does
# not have capacity.
self.assertEqual('large', app_no_retention.server)
self.assertEqual('large', app_with_retention.server)
# Mark large server down. App with retention will remain on the server.
# App without retention should be pending.
large_server.state = scheduler.State.down
cell.schedule()
self.assertEqual(None, app_no_retention.server)
self.assertEqual('large', app_with_retention.server)
@mock.patch('time.time', mock.Mock(return_value=100))
def test_restore(self):
"""Tests app restore."""
cell = scheduler.Cell('top')
large_server = scheduler.Server('large', [10, 10], traits=0,
valid_until=200)
cell.add_node(large_server)
small_server = scheduler.Server('small', [3, 3], traits=0,
valid_until=1000)
cell.add_node(small_server)
apps = app_list(1, 'app', 50, [6, 6], lease=50)
for app in apps:
cell.add_app(cell.partitions[None].allocation, app)
# 100 sec left, app lease is 50, should fit.
time.time.return_value = 100
cell.schedule()
self.assertEqual(apps[0].server, 'large')
time.time.return_value = 190
apps_not_fit = app_list(1, 'app-not-fit', 90, [6, 6], lease=50)
for app in apps_not_fit:
cell.add_app(cell.partitions[None].allocation, app)
cell.schedule()
self.assertIsNone(apps_not_fit[0].server)
self.assertEqual(apps[0].server, 'large')
@mock.patch('time.time', mock.Mock(return_value=10))
def test_renew(self):
"""Tests app restore."""
cell = scheduler.Cell('top')
server_a = scheduler.Server('a', [10, 10], traits=0,
valid_until=1000)
cell.add_node(server_a)
apps = app_list(1, 'app', 50, [6, 6], lease=50)
for app in apps:
cell.add_app(cell.partitions[None].allocation, app)
cell.schedule()
self.assertEqual(apps[0].server, 'a')
self.assertEqual(apps[0].placement_expiry, 60)
time.time.return_value = 100
cell.schedule()
self.assertEqual(apps[0].server, 'a')
self.assertEqual(apps[0].placement_expiry, 60)
time.time.return_value = 200
apps[0].renew = True
cell.schedule()
self.assertEqual(apps[0].server, 'a')
self.assertEqual(apps[0].placement_expiry, 250)
self.assertFalse(apps[0].renew)
# fast forward to 975, close to server 'a' expiration, app will
# migratoe to 'b' on renew.
server_b = scheduler.Server('b', [10, 10], traits=0,
valid_until=2000)
cell.add_node(server_b)
time.time.return_value = 975
apps[0].renew = True
cell.schedule()
self.assertEqual(apps[0].server, 'b')
self.assertEqual(apps[0].placement_expiry, 1025)
self.assertFalse(apps[0].renew)
# fast forward to 1975, when app can't be renewed on server b, but
# there is not alternative placement.
time.time.return_value = 1975
apps[0].renew = True
cell.schedule()
self.assertEqual(apps[0].server, 'b')
# Placement expiry did not change, as placement was not found.
self.assertEqual(apps[0].placement_expiry, 1025)
# Renew flag is not cleared, as new placement was not found.
self.assertTrue(apps[0].renew)
def test_partition_server_down(self):
"""Test placement when server in the partition goes down."""
cell = scheduler.Cell('top')
srv_x1 = scheduler.Server('s_x1', [10, 10], valid_until=500, label='x')
srv_x2 = scheduler.Server('s_x2', [10, 10], valid_until=500, label='x')
srv_y1 = scheduler.Server('s_y1', [10, 10], valid_until=500, label='y')
srv_y2 = scheduler.Server('s_y2', [10, 10], valid_until=500, label='y')
cell.add_node(srv_x1)
cell.add_node(srv_x2)
cell.add_node(srv_y1)
cell.add_node(srv_y2)
app_x1 = scheduler.Application('a_x1', 1, [1, 1], 'app')
app_x2 = scheduler.Application('a_x2', 1, [1, 1], 'app')
app_y1 = scheduler.Application('a_y1', 1, [1, 1], 'app')
app_y2 = scheduler.Application('a_y2', 1, [1, 1], 'app')
cell.partitions['x'].allocation.add(app_x1)
cell.partitions['x'].allocation.add(app_x2)
cell.partitions['y'].allocation.add(app_y1)
cell.partitions['y'].allocation.add(app_y2)
placement = cell.schedule()
self.assertEqual(len(placement), 4)
# Default strategy will distribute two apps on each of the servers
# in the partition.
#
# For future test it is important that each server has an app, so
# we assert on that.
self.assertEqual(len(srv_x1.apps), 1)
self.assertEqual(len(srv_x2.apps), 1)
self.assertEqual(len(srv_y1.apps), 1)
self.assertEqual(len(srv_y2.apps), 1)
# Verify that all apps are placed in the returned placement.
for (_app, before, _exp_before, after, _exp_after) in placement:
self.assertIsNone(before)
self.assertIsNotNone(after)
# Bring server down in each partition.
srv_x1.state = scheduler.State.down
srv_y1.state = scheduler.State.down
placement = cell.schedule()
self.assertEqual(len(placement), 4)
# Check that in the updated placement before and after are not None.
for (_app, before, _exp_before, after, _exp_after) in placement:
self.assertIsNotNone(before)
self.assertIsNotNone(after)
def test_placement_shortcut(self):
"""Test no placement tracker."""
cell = scheduler.Cell('top')
srv_1 = scheduler.Server('s1', [10, 10], valid_until=500, label='x')
srv_2 = scheduler.Server('s2', [10, 10], valid_until=500, label='x')
cell.add_node(srv_1)
cell.add_node(srv_2)
app_large_dim1 = scheduler.Application('large-1', 100, [7, 1], 'app')
app_large_dim2 = scheduler.Application('large-2', 100, [1, 7], 'app')
cell.partitions['x'].allocation.add(app_large_dim1)
cell.partitions['x'].allocation.add(app_large_dim2)
cell.schedule()
self.assertIsNotNone(app_large_dim1.server)
self.assertIsNotNone(app_large_dim2.server)
# Add lower priority apps - can't be scheduled.
#
# As free size of top level node is 9x9, placement attempt will be
# made.
medium_apps = []
for appid in range(1, 10):
app_med = scheduler.Application(
'medium-%s' % appid, 90, [4, 4], 'app')
cell.partitions['x'].allocation.add(app_med)
medium_apps.append(app_med)
cell.schedule()
for app in medium_apps:
self.assertIsNone(app.server)
class IdentityGroupTest(unittest.TestCase):
"""scheduler IdentityGroup test."""
def test_basic(self):
"""Test basic acquire/release ops."""
ident_group = scheduler.IdentityGroup(3)
self.assertEqual(0, ident_group.acquire())
self.assertEqual(1, ident_group.acquire())
self.assertEqual(2, ident_group.acquire())
self.assertEqual(None, ident_group.acquire())
ident_group.release(1)
self.assertEqual(1, ident_group.acquire())
def test_adjust(self):
"""Test identity group count adjustement."""
ident_group = scheduler.IdentityGroup(5)
ident_group.available = set([1, 3])
ident_group.adjust(7)
self.assertEqual(set([1, 3, 5, 6]), ident_group.available)
ident_group.adjust(3)
self.assertEqual(set([1]), ident_group.available)
def test_adjust_relese(self):
"""Test releasing identity when identity exceeds the count."""
ident_group = scheduler.IdentityGroup(1)
self.assertEqual(0, ident_group.acquire())
self.assertEqual(len(ident_group.available), 0)
ident_group.adjust(0)
ident_group.release(0)
self.assertEqual(len(ident_group.available), 0)
def _time(string):
"""Convert a formatted datetime to a timestamp."""
return time.mktime(time.strptime(string, '%Y-%m-%d %H:%M:%S'))
class RebootSchedulerTest(unittest.TestCase):
"""reboot scheduler test."""
def test_bucket(self):
"""Test RebootBucket."""
bucket = scheduler.RebootBucket(_time('2000-01-03 00:00:00'))
# cost of inserting into empty bucket is zero
server1 = scheduler.Server('s1', [10, 10],
up_since=_time('2000-01-01 00:00:00'))
self.assertEqual(0, bucket.cost(server1))
# cost of inserting into non-empty bucket is size of bucket
bucket.add(server1)
server2 = scheduler.Server('s2', [10, 10],
up_since=_time('2000-01-01 00:00:00'))
self.assertEqual(1, bucket.cost(server2))
# when server would be too old, cost is prohibitive
server3 = scheduler.Server('s3', [10, 10],
up_since=_time('1999-01-01 00:00:00'))
self.assertEqual(float('inf'), bucket.cost(server3))
# when server is too close to reboot date, cost is prohibitive
server4 = scheduler.Server('s1', [10, 10],
up_since=_time('2000-01-02 10:00:00'))
self.assertEqual(float('inf'), bucket.cost(server4))
def test_reboots(self):
"""Test RebootScheduler."""
partition = scheduler.Partition(now=_time('2000-01-01 00:00:00'))
server1 = scheduler.Server('s1', [10, 10],
up_since=_time('2000-01-01 00:00:00'))
server2 = scheduler.Server('s2', [10, 10],
up_since=_time('2000-01-01 00:00:00'))
server3 = scheduler.Server('s3', [10, 10],
up_since=_time('2000-01-01 00:00:00'))
server4 = scheduler.Server('s4', [10, 10],
up_since=_time('1999-12-24 00:00:00'))
# adding to existing bucket
# pylint: disable=W0212
timestamp = partition._reboot_buckets[0].timestamp
partition.add(server1, timestamp)
self.assertEqual(timestamp,
server1.valid_until)
# adding to non-existsing bucket, results in finding a more
# appropriate bucket
partition.add(server2, timestamp + 600)
self.assertNotEqual(timestamp + 600,
server2.valid_until)
# will get into different bucket than server2, so bucket sizes
# stay low
partition.add(server3)
self.assertNotEqual(server2.valid_until,
server3.valid_until)
# server max_lifetime is respected
partition.add(server4)
self.assertTrue(
server4.valid_until <
server4.up_since + scheduler.DEFAULT_SERVER_UPTIME
)
class ShapeTest(unittest.TestCase):
"""App shape test cases."""
def test_affinity_constraints(self):
"""Test affinity constraints."""
aff = scheduler.Affinity('foo', {})
self.assertEqual(('foo',), aff.constraints)
aff = scheduler.Affinity('foo', {'server': 1})
self.assertEqual(('foo', 1,), aff.constraints)
def test_app_shape(self):
"""Test application shape."""
app = scheduler.Application('foo', 11, [1, 1, 1], 'bar')
self.assertEqual(('bar', 0,), app.shape()[0])
app.lease = 5
self.assertEqual(('bar', 5,), app.shape()[0])
app = scheduler.Application('foo', 11, [1, 1, 1], 'bar',
affinity_limits={'server': 1, 'rack': 2})
# Values of the dict return ordered by key, (rack, server).
self.assertEqual(('bar', 1, 2, 0,), app.shape()[0])
app.lease = 5
self.assertEqual(('bar', 1, 2, 5,), app.shape()[0])
def test_placement_tracker(self):
"""Tests placement tracker."""
app = scheduler.Application('foo', 11, [2, 2, 2], 'bar')
placement_tracker = scheduler.PlacementFeasibilityTracker()
placement_tracker.adjust(app)
# Same app.
self.assertFalse(placement_tracker.feasible(app))
# Larger app, same shape.
app = scheduler.Application('foo', 11, [3, 3, 3], 'bar')
self.assertFalse(placement_tracker.feasible(app))
# Smaller app, same shape.
app = scheduler.Application('foo', 11, [1, 1, 1], 'bar')
self.assertTrue(placement_tracker.feasible(app))
# Different affinity.
app = scheduler.Application('foo', 11, [5, 5, 5], 'bar1')
self.assertTrue(placement_tracker.feasible(app))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "de594d29d90c4c2484637690d0ff31d1",
"timestamp": "",
"source": "github",
"line_count": 1645,
"max_line_length": 79,
"avg_line_length": 38.45835866261398,
"alnum_prop": 0.5705614567526556,
"repo_name": "bretttegart/treadmill",
"id": "8d5bd83ca44a6215cafbcfc73a3f1e590cf448c3",
"size": "63264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/scheduler_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "2975485"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "56911"
}
],
"symlink_target": ""
} |
"""HTML sanitized string field."""
from __future__ import absolute_import, print_function
import bleach
from .sanitizedunicode import SanitizedUnicode
class SanitizedHTML(SanitizedUnicode):
"""String field which strips sanitizes HTML using the bleach library."""
def __init__(self, tags=None, attrs=None, *args, **kwargs):
"""Initialize field."""
super(SanitizedHTML, self).__init__(*args, **kwargs)
self.tags = tags or [
'a',
'abbr',
'acronym',
'b',
'blockquote',
'br',
'code',
'div',
'em',
'i',
'li',
'ol',
'p',
'pre',
'span',
'strike',
'strong',
'sub',
'sup',
'u',
'ul',
]
self.attrs = attrs or {
'*': ['class'],
'a': ['href', 'title', 'name', 'class', 'rel'],
'abbr': ['title'],
'acronym': ['title'],
}
def _deserialize(self, value, attr, data):
"""Deserialize string by sanitizing HTML."""
value = super(SanitizedHTML, self)._deserialize(value, attr, data)
return bleach.clean(
value,
tags=self.tags,
attributes=self.attrs,
strip=True,
).strip()
| {
"content_hash": "257a8083482d76e8e54f6db1f90e0724",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 76,
"avg_line_length": 25.436363636363637,
"alnum_prop": 0.44889206576125806,
"repo_name": "tiborsimko/invenio-records-rest",
"id": "7eb03d3c1c4b94d1c256432566b311937c5b5cce",
"size": "1634",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "invenio_records_rest/schemas/fields/sanitizedhtml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255451"
},
{
"name": "Shell",
"bytes": "431"
}
],
"symlink_target": ""
} |
import unittest
import numpy as np
import sys
sys.path.append('..')
from op_test import OpTest
from paddle.fluid import core
import paddle
alignment = 256
paddle.enable_static()
class TestAllocContinuousSpace(OpTest):
def setUp(self):
self.op_type = "coalesce_tensor"
self.dtype, self.fluid_dtype = self.init_dtype()
attrs = self.init_attr()
self.copy_data = attrs["copy_data"]
self.constant = attrs["constant"]
self.set_constant = attrs["set_constant"]
self.Inputs = self.init_input()
self.Outputs, self.FusedOutput = self.init_output(
self.Inputs, self.set_constant, self.constant
)
self.inputs = {'Input': self.Inputs}
self.attrs = attrs
self.outputs = {'Output': self.Outputs, 'FusedOutput': self.FusedOutput}
def init_dtype(self):
return np.float32, int(core.VarDesc.VarType.FP32)
def init_input(self):
inputs = []
inputs.append(("x1", np.random.random([20, 3]).astype(self.dtype)))
inputs.append(("x2", np.random.random([20]).astype(self.dtype)))
inputs.append(("x3", np.random.random([1]).astype(self.dtype)))
inputs.append(("x4", np.random.random([200, 30]).astype(self.dtype)))
inputs.append(("x5", np.random.random([30]).astype(self.dtype)))
inputs.append(("x6", np.random.random([1]).astype(self.dtype)))
return inputs
def init_attr(self):
return {
"copy_data": True,
"set_constant": False,
"constant": 0.0,
"dtype": self.fluid_dtype,
}
def init_output(self, input_list, set_constant, constant):
inputs = []
outputs = input_list
for input in input_list:
length = len(input[1].flatten())
aligned_len = (length + alignment) / alignment * alignment
out = np.zeros(int(aligned_len))
out[0:length] = input[1].flatten()
inputs.append(out)
coalesce_tensor_var = np.concatenate([input for input in inputs])
if set_constant:
coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant
outputs = [
(out[0], np.ones(out[1].shape).astype(self.dtype) * constant)
for out in outputs
]
return outputs, coalesce_tensor_var
def test_check_output(self):
self.check_output_with_place(
place=paddle.device.MLUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
)
class TestAllocContinuousSpace2(TestAllocContinuousSpace):
def init_attr(self):
return {
"copy_data": False,
"set_constant": True,
"constant": 5,
"dtype": self.fluid_dtype,
"user_defined_size_of_dtype": 2,
}
def test_check_output(self):
self.check_output_with_place(
place=paddle.device.MLUPlace(0),
no_check_set=["FusedOutput"],
atol=1e-5,
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "997a26f76a7a4e0ceba82ef79d8be6e6",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 80,
"avg_line_length": 31.632653061224488,
"alnum_prop": 0.5725806451612904,
"repo_name": "PaddlePaddle/Paddle",
"id": "97bc47971f475acd1e433f7253672aa006c73c5c",
"size": "3713",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/mlu/test_coalesce_tensor_op_mlu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
} |
def mean(values: list) -> int:
length = len(values)
result = 0
for value in values:
result += value
result /= length
return result
def median(values: list) -> float:
length = len(values)
values = sorted(values)
if length % 2 != 0:
return values[length // 2]
else:
return (values[length // 2] + values[length // 2 - 1]) / 2
def mode(values: list) -> int:
counters = dict()
result = None
for value in values:
if value in counters:
counters[value] += 1
else:
counters[value] = 1
if (result is None) or (counters[value] > counters[result]):
result = value
elif (counters[value] == counters[result]) and (value < result):
result = value
return result
n = int(input())
x = [int(token) for token in input().split()]
print(mean(x))
print(median(x))
print(mode(x))
| {
"content_hash": "86af3f0cdf23e58dfc8ad770cab22dec",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 66,
"avg_line_length": 21.07894736842105,
"alnum_prop": 0.6441947565543071,
"repo_name": "ehouarn-perret/EhouarnPerret.Python.HackerRank",
"id": "f4da015b3ca3e9bf2341a41a325d9c49ac49dbc2",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "HackerRank/0 - Tutorials/10 Days of Statistics/Day 0 - Mean Median and Mode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70347"
}
],
"symlink_target": ""
} |
import distribute_setup
distribute_setup.use_setuptools()
import sys
from setuptools import setup
def read_file(name):
"""
Read file content
"""
f = open(name)
try:
return f.read()
except IOError:
print("could not read %r" % name)
f.close()
PROJECT = 'skeleton'
VERSION = '0.6'
URL = 'http://dinoboff.github.com/skeleton'
AUTHOR = 'Damien Lebrun'
AUTHOR_EMAIL = 'dinoboff@gmail.com'
DESC = "Basic Template system for project skeleton."
LONG_DESC = read_file('README.rst') + '\n\n' + read_file('HISTORY.rst')
EXTRAS = {}
if sys.version_info > (3,):
EXTRAS['use_2to3'] = True
setup(
name=PROJECT,
version=VERSION,
description=DESC,
long_description=LONG_DESC,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license='BSD',
packages=['skeleton', 'skeleton.tests', 'skeleton.examples'],
test_suite='skeleton.tests',
include_package_data=True,
zip_safe=False,
install_requires=[],
extras_require={
'virtualenv-templates': [
'virtualenvwrapper>=2.1.1',
'virtualenvwrapper.project>=1.0'
],
},
entry_points={
'virtualenvwrapper.project.template': [
'package = skeleton.examples.basicpackage:virtualenv_warpper_hook',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.1',
],
**EXTRAS
)
| {
"content_hash": "396daa6535d5844b2242873307e430ea",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 24.728571428571428,
"alnum_prop": 0.5967648757943386,
"repo_name": "dinoboff/skeleton",
"id": "d73a0fa16c1f2776950efda988ef971ac5b76ef2",
"size": "1793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "95627"
}
],
"symlink_target": ""
} |
from flask import Blueprint
from flask import render_template, request, redirect
from cloudmesh.config.cm_keys import cm_keys
from cloudmesh.util.util import cond_decorator
import cloudmesh
from flask.ext.login import login_required
from cloudmesh.util.logger import LOGGER
log = LOGGER(__file__)
mooc_module = Blueprint('mooc_module', __name__)
#
# ROUTE: mooc
#
@mooc_module.route('/mooc')
def display_mooc():
return render_template('mooc.html')
| {
"content_hash": "f1670238fc080aeca726be75ef4fb0f2",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 52,
"avg_line_length": 18.48,
"alnum_prop": 0.7510822510822511,
"repo_name": "rajpushkar83/cloudmesh",
"id": "8838bdf3f3ae19d0cb3968e8afff792e96f0e93d",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/modules/mooc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "390396"
},
{
"name": "HTML",
"bytes": "4158355"
},
{
"name": "Java",
"bytes": "369"
},
{
"name": "JavaScript",
"bytes": "2803977"
},
{
"name": "Makefile",
"bytes": "7572"
},
{
"name": "PHP",
"bytes": "183557"
},
{
"name": "Python",
"bytes": "1736957"
},
{
"name": "Ruby",
"bytes": "10670"
},
{
"name": "Shell",
"bytes": "32263"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python failover_a_managed_instance..py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.managed_instances.begin_failover(
resource_group_name="group1",
managed_instance_name="instanceName",
).result()
print(response)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2021-05-01-preview/examples/FailoverManagedInstance.json
if __name__ == "__main__":
main()
| {
"content_hash": "903022e8e609a41f7b40a3e2d1f375e0",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 135,
"avg_line_length": 33.696969696969695,
"alnum_prop": 0.7320143884892086,
"repo_name": "Azure/azure-sdk-for-python",
"id": "7d620328daca0e949785a4a6d94872ffcdd16e05",
"size": "1580",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/generated_samples/failover_a_managed_instance..py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0051_auto_20150303_1454'),
]
operations = [
migrations.RenameField(
model_name='taxsaveinputs',
old_name='item_phase_out_threshold',
new_name='item_phase_out_threshold_single',
),
migrations.AddField(
model_name='taxsaveinputs',
name='item_phase_out_threshold_head',
field=models.FloatField(default=None, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='taxsaveinputs',
name='item_phase_out_threshold_jointly',
field=models.FloatField(default=None, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='taxsaveinputs',
name='item_phase_out_threshold_separately',
field=models.FloatField(default=None, null=True, blank=True),
preserve_default=True,
),
]
| {
"content_hash": "70c19239f24d0795a43d500a2cba9f56",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 73,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.5903508771929824,
"repo_name": "nolanzandi/webapp-public",
"id": "25ace0ae5aa2572553fdee53990a184942184cfe",
"size": "1164",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/migrations/0052_auto_20150303_1456.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61908"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "380111"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
} |
from app import create_app, db
from app.models import User, Role, Permission, Question, Comment, Vote, Answer
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
import os
app = create_app('default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role,
Question=Question, Permission=Permission,
Comment=Comment, Vote=Vote, Answer=Answer)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command("db", MigrateCommand)
@manager.command
def test():
import unittest
tests = unittest.TestLoader().discover('tests')#tests is dir
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def deploy():
from app import db
from app.models import User, Role, Question, Answer, Comment, Vote
# db.drop_all()
# db.create_all()
# db.session.commit()
Role.insert_roles()
#
#User.generate_fake()
# User.add_self_follows()
Question.generate_fake()
Answer.generate_fake()
# Comment.generate_fake()
Vote.generate_fake()
# db.session.commit()
def detect():
Role.insert_roles()
if __name__ == '__main__':
manager.run()
detect()
| {
"content_hash": "5b49f359a32079f07f7b868473f6c7c6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 78,
"avg_line_length": 24.73076923076923,
"alnum_prop": 0.6765163297045101,
"repo_name": "VashonHu/MyWebsiteWithFlask",
"id": "c387098d6633d5f47c84dff481e7c43b6c2fa104",
"size": "1305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "822"
},
{
"name": "HTML",
"bytes": "24530"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "61318"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from urllib import quote
import cloudstorage
from cloudstorage.storage_api import _StorageApi
from google.appengine.api.blobstore import blobstore
from rogerthat.consts import DEBUG, ROGERTHAT_ATTACHMENTS_BUCKET
from rogerthat.settings import get_server_settings
from rogerthat.utils import read_file_in_chunks
def upload_to_gcs(file_data, content_type, file_name):
"""
Args:
file_data (str or file-like object)
content_type (unicode)
file_name (unicode)
Returns:
blob_key (unicode): An encrypted `BlobKey` string.
"""
# this can fail on the devserver for some reason
with cloudstorage.open(file_name, 'w', content_type=content_type) as f:
if isinstance(file_data, basestring):
f.write(file_data)
else:
try:
for chunk in read_file_in_chunks(file_data):
f.write(chunk)
except AttributeError:
raise ValueError('file_data must be a file-like object')
return blobstore.create_gs_key('/gs' + file_name).decode('utf-8')
def get_serving_url(filename):
"""
Args:
filename (unicode)
"""
path, file_name = filename.rsplit('/', 1)
full_path = '%s/%s' % (path, quote(file_name))
if DEBUG:
return '%s/_ah/gcs%s' % (get_server_settings().baseUrl, full_path)
return _StorageApi.api_url + full_path
def get_blobstore_cloudstorage_path(blob_key):
return '%s/blobstore/%s' % (ROGERTHAT_ATTACHMENTS_BUCKET, blob_key)
| {
"content_hash": "000e7054364542fc2a5a5cb3c852abb7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 31.2,
"alnum_prop": 0.6493589743589744,
"repo_name": "our-city-app/oca-backend",
"id": "eb3537f6ba193b29bcc096d478630e57786d660b",
"size": "2200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rogerthat/bizz/gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "166"
},
{
"name": "CSS",
"bytes": "62142"
},
{
"name": "HTML",
"bytes": "697349"
},
{
"name": "JavaScript",
"bytes": "1023951"
},
{
"name": "PostScript",
"bytes": "4694678"
},
{
"name": "Python",
"bytes": "3149982"
},
{
"name": "Shell",
"bytes": "5839"
},
{
"name": "TypeScript",
"bytes": "690248"
}
],
"symlink_target": ""
} |
"""Facebook platform for notify component."""
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.const import CONTENT_TYPE_JSON
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
CONF_PAGE_ACCESS_TOKEN = "page_access_token"
BASE_URL = "https://graph.facebook.com/v2.6/me/messages"
CREATE_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/message_creatives"
SEND_BROADCAST_URL = "https://graph.facebook.com/v2.11/me/broadcast_messages"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PAGE_ACCESS_TOKEN): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Get the Facebook notification service."""
return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN])
class FacebookNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Facebook service."""
def __init__(self, access_token):
"""Initialize the service."""
self.page_access_token = access_token
def send_message(self, message="", **kwargs):
"""Send some message."""
payload = {"access_token": self.page_access_token}
targets = kwargs.get(ATTR_TARGET)
data = kwargs.get(ATTR_DATA)
body_message = {"text": message}
if data is not None:
body_message.update(data)
# Only one of text or attachment can be specified
if "attachment" in body_message:
body_message.pop("text")
if not targets:
_LOGGER.error("At least 1 target is required")
return
# broadcast message
if targets[0].lower() == "broadcast":
broadcast_create_body = {"messages": [body_message]}
_LOGGER.debug("Broadcast body %s : ", broadcast_create_body)
resp = requests.post(
CREATE_BROADCAST_URL,
data=json.dumps(broadcast_create_body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
_LOGGER.debug("FB Messager broadcast id %s : ", resp.json())
# at this point we get broadcast id
broadcast_body = {
"message_creative_id": resp.json().get("message_creative_id"),
"notification_type": "REGULAR",
}
resp = requests.post(
SEND_BROADCAST_URL,
data=json.dumps(broadcast_body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
if resp.status_code != 200:
log_error(resp)
# non-broadcast message
else:
for target in targets:
# If the target starts with a "+", it's a phone number,
# otherwise it's a user id.
if target.startswith("+"):
recipient = {"phone_number": target}
else:
recipient = {"id": target}
body = {"recipient": recipient, "message": body_message}
resp = requests.post(
BASE_URL,
data=json.dumps(body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
if resp.status_code != 200:
log_error(resp)
def log_error(response):
"""Log error message."""
obj = response.json()
error_message = obj["error"]["message"]
error_code = obj["error"]["code"]
_LOGGER.error(
"Error %s : %s (Code %s)", response.status_code, error_message, error_code
)
| {
"content_hash": "57c5f0e8ee93f81bf189dba19c340ee5",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 82,
"avg_line_length": 32.66942148760331,
"alnum_prop": 0.5722236276245889,
"repo_name": "qedi-r/home-assistant",
"id": "452b81c0f16a0602d59efc54f19fb48e17290629",
"size": "3953",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/facebook/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
import hashlib
import os
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
import yaml
from tempest import clients
from tempest.common import cred_provider
from tempest.common import fixed_network
from tempest import config
from tempest import exceptions
CONF = config.CONF
LOG = logging.getLogger(__name__)
def read_accounts_yaml(path):
with open(path, 'r') as yaml_file:
accounts = yaml.load(yaml_file)
return accounts
class Accounts(cred_provider.CredentialProvider):
def __init__(self, identity_version=None, name=None):
super(Accounts, self).__init__(identity_version=identity_version,
name=name)
if (CONF.auth.test_accounts_file and
os.path.isfile(CONF.auth.test_accounts_file)):
accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
self.use_default_creds = False
else:
accounts = {}
self.use_default_creds = True
self.hash_dict = self.get_hash_dict(accounts)
self.accounts_dir = os.path.join(lockutils.get_lock_path(CONF),
'test_accounts')
self.isolated_creds = {}
@classmethod
def _append_role(cls, role, account_hash, hash_dict):
if role in hash_dict['roles']:
hash_dict['roles'][role].append(account_hash)
else:
hash_dict['roles'][role] = [account_hash]
return hash_dict
@classmethod
def get_hash_dict(cls, accounts):
hash_dict = {'roles': {}, 'creds': {}, 'networks': {}}
# Loop over the accounts read from the yaml file
for account in accounts:
roles = []
types = []
resources = []
if 'roles' in account:
roles = account.pop('roles')
if 'types' in account:
types = account.pop('types')
if 'resources' in account:
resources = account.pop('resources')
temp_hash = hashlib.md5()
temp_hash.update(six.text_type(account).encode('utf-8'))
temp_hash_key = temp_hash.hexdigest()
hash_dict['creds'][temp_hash_key] = account
for role in roles:
hash_dict = cls._append_role(role, temp_hash_key,
hash_dict)
# If types are set for the account append the matching role
# subdict with the hash
for type in types:
if type == 'admin':
hash_dict = cls._append_role(CONF.identity.admin_role,
temp_hash_key, hash_dict)
elif type == 'operator':
hash_dict = cls._append_role(
CONF.object_storage.operator_role, temp_hash_key,
hash_dict)
elif type == 'reseller_admin':
hash_dict = cls._append_role(
CONF.object_storage.reseller_admin_role,
temp_hash_key,
hash_dict)
# Populate the network subdict
for resource in resources:
if resource == 'network':
hash_dict['networks'][temp_hash_key] = resources[resource]
else:
LOG.warning('Unkown resource type %s, ignoring this field'
% resource)
return hash_dict
def is_multi_user(self):
# Default credentials is not a valid option with locking Account
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
else:
return len(self.hash_dict['creds']) > 1
def is_multi_tenant(self):
return self.is_multi_user()
def _create_hash_file(self, hash_string):
path = os.path.join(os.path.join(self.accounts_dir, hash_string))
if not os.path.isfile(path):
with open(path, 'w') as fd:
fd.write(self.name)
return True
return False
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
# Cast as a list because in some edge cases a set will be passed in
hashes = list(hashes)
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
return hashes[0]
names = []
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
return _hash
else:
path = os.path.join(os.path.join(self.accounts_dir,
_hash))
with open(path, 'r') as fd:
names.append(fd.read())
msg = ('Insufficient number of users provided. %s have allocated all '
'the credentials for this allocation request' % ','.join(names))
raise exceptions.InvalidConfiguration(msg)
def _get_match_hash_list(self, roles=None):
hashes = []
if roles:
# Loop over all the creds for each role in the subdict and generate
# a list of cred lists for each role
for role in roles:
temp_hashes = self.hash_dict['roles'].get(role, None)
if not temp_hashes:
raise exceptions.InvalidConfiguration(
"No credentials with role: %s specified in the "
"accounts ""file" % role)
hashes.append(temp_hashes)
# Take the list of lists and do a boolean and between each list to
# find the creds which fall under all the specified roles
temp_list = set(hashes[0])
for hash_list in hashes[1:]:
temp_list = temp_list & set(hash_list)
hashes = temp_list
else:
hashes = self.hash_dict['creds'].keys()
# NOTE(mtreinish): admin is a special case because of the increased
# privlege set which could potentially cause issues on tests where that
# is not expected. So unless the admin role isn't specified do not
# allocate admin.
admin_hashes = self.hash_dict['roles'].get(CONF.identity.admin_role,
None)
if ((not roles or CONF.identity.admin_role not in roles) and
admin_hashes):
useable_hashes = [x for x in hashes if x not in admin_hashes]
else:
useable_hashes = hashes
return useable_hashes
def _sanitize_creds(self, creds):
temp_creds = creds.copy()
temp_creds.pop('password')
return temp_creds
def _get_creds(self, roles=None):
if self.use_default_creds:
raise exceptions.InvalidConfiguration(
"Account file %s doesn't exist" % CONF.auth.test_accounts_file)
useable_hashes = self._get_match_hash_list(roles)
free_hash = self._get_free_hash(useable_hashes)
clean_creds = self._sanitize_creds(
self.hash_dict['creds'][free_hash])
LOG.info('%s allocated creds:\n%s' % (self.name, clean_creds))
return self._wrap_creds_with_network(free_hash)
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
hash_path = os.path.join(self.accounts_dir, hash_string)
if not os.path.isfile(hash_path):
LOG.warning('Expected an account lock file %s to remove, but '
'one did not exist' % hash_path)
else:
os.remove(hash_path)
if not os.listdir(self.accounts_dir):
os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict['creds']:
# Comparing on the attributes that are expected in the YAML
init_attributes = creds.get_init_attributes()
hash_attributes = self.hash_dict['creds'][_hash].copy()
if ('user_domain_name' in init_attributes and 'user_domain_name'
not in hash_attributes):
# Allow for the case of domain_name populated from config
domain_name = CONF.identity.admin_domain_name
hash_attributes['user_domain_name'] = domain_name
if all([getattr(creds, k) == hash_attributes[k] for
k in init_attributes]):
return _hash
raise AttributeError('Invalid credentials %s' % creds)
def remove_credentials(self, creds):
_hash = self.get_hash(creds)
clean_creds = self._sanitize_creds(self.hash_dict['creds'][_hash])
self.remove_hash(_hash)
LOG.info("%s returned allocated creds:\n%s" % (self.name, clean_creds))
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
net_creds = self._get_creds()
self.isolated_creds['primary'] = net_creds
return net_creds
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
net_creds = self._get_creds()
self.isolated_creds['alt'] = net_creds
return net_creds
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
exist_creds = self.isolated_creds.get(six.text_type(roles).encode(
'utf-8'), None)
# The force kwarg is used to allocate an additional set of creds with
# the same role list. The index used for the previously allocation
# in the isolated_creds dict will be moved.
if exist_creds and not force_new:
return exist_creds
elif exist_creds and force_new:
new_index = six.text_type(roles).encode('utf-8') + '-' + \
six.text_type(len(self.isolated_creds)).encode('utf-8')
self.isolated_creds[new_index] = exist_creds
net_creds = self._get_creds(roles=roles)
self.isolated_creds[six.text_type(roles).encode('utf-8')] = net_creds
return net_creds
def clear_isolated_creds(self):
for creds in self.isolated_creds.values():
self.remove_credentials(creds)
def get_admin_creds(self):
return self.get_creds_by_roles([CONF.identity.admin_role])
def is_role_available(self, role):
if self.use_default_creds:
return False
else:
if self.hash_dict['roles'].get(role):
return True
return False
def admin_available(self):
return self.is_role_available(CONF.identity.admin_role)
def _wrap_creds_with_network(self, hash):
creds_dict = self.hash_dict['creds'][hash]
credential = cred_provider.get_credentials(
identity_version=self.identity_version, **creds_dict)
net_creds = cred_provider.TestResources(credential)
net_clients = clients.Manager(credentials=credential)
compute_network_client = net_clients.networks_client
net_name = self.hash_dict['networks'].get(hash, None)
try:
network = fixed_network.get_network_from_name(
net_name, compute_network_client)
except exceptions.InvalidConfiguration:
network = {}
net_creds.set_resources(network=network)
return net_creds
class NotLockingAccounts(Accounts):
"""Credentials provider which always returns the first and second
configured accounts as primary and alt users.
This credential provider can be used in case of serial test execution
to preserve the current behaviour of the serial tempest run.
"""
def _unique_creds(self, cred_arg=None):
"""Verify that the configured credentials are valid and distinct """
try:
user = self.get_primary_creds()
alt_user = self.get_alt_creds()
return getattr(user, cred_arg) != getattr(alt_user, cred_arg)
except exceptions.InvalidCredentials as ic:
msg = "At least one of the configured credentials is " \
"not valid: %s" % ic.message
raise exceptions.InvalidConfiguration(msg)
def is_multi_user(self):
return self._unique_creds('username')
def is_multi_tenant(self):
return self._unique_creds('tenant_id')
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
primary_credential = cred_provider.get_configured_credentials(
credential_type='user', identity_version=self.identity_version)
self.isolated_creds['primary'] = cred_provider.TestResources(
primary_credential)
return self.isolated_creds['primary']
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
alt_credential = cred_provider.get_configured_credentials(
credential_type='alt_user',
identity_version=self.identity_version)
self.isolated_creds['alt'] = cred_provider.TestResources(
alt_credential)
return self.isolated_creds['alt']
def clear_isolated_creds(self):
self.isolated_creds = {}
def get_admin_creds(self):
creds = cred_provider.get_configured_credentials(
"identity_admin", fill_in=False)
self.isolated_creds['admin'] = cred_provider.TestResources(creds)
return self.isolated_creds['admin']
def get_creds_by_roles(self, roles, force_new=False):
msg = "Credentials being specified through the config file can not be"\
" used with tests that specify using credentials by roles. "\
"Either exclude/skip the tests doing this or use either an "\
"test_accounts_file or tenant isolation."
raise exceptions.InvalidConfiguration(msg)
| {
"content_hash": "a8a1a2fe2834044fb09a81a377fe2e3c",
"timestamp": "",
"source": "github",
"line_count": 341,
"max_line_length": 79,
"avg_line_length": 41.74486803519061,
"alnum_prop": 0.5832103969090271,
"repo_name": "Juraci/tempest",
"id": "78e0e72657f47b8e22fd74bb5a7eb6b3834dddbf",
"size": "14873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/common/accounts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2701511"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import argparse
import sys
import beakerx_tabledisplay
from notebook import notebookapp as app
from .install import install, uninstall
def install_subparser(subparser):
install_parser = subparser.add_parser('install', help='installs BeakerX tabledisplay extensions')
install_parser.set_defaults(func=install)
install_parser.add_argument("--prefix",
help="location of the environment to install into",
default=sys.prefix)
install_parser.add_argument("--lab",
help="install lab extension",
action='store_true')
return subparser
def uninstall_subparser(subparser):
uninstall_parser = subparser.add_parser('uninstall', help='uninstalls BeakerX tabledisplay extensions')
uninstall_parser.set_defaults(func=uninstall)
uninstall_parser.add_argument("--prefix",
help="location of the environment to uninstall from",
default=sys.prefix)
return subparser
def run_jupyter(jupyter_commands):
app.launch_new_instance(jupyter_commands)
def init_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version=beakerx_tabledisplay.__version__)
parser.set_defaults(func=run_jupyter)
subparsers = parser.add_subparsers()
install_subparser(subparsers)
uninstall_subparser(subparsers)
return parser
def parse():
parser = init_parser()
args, jupyter_commands = parser.parse_known_args()
if args.func == run_jupyter:
args.func(jupyter_commands)
elif not jupyter_commands:
args.func(args)
else:
parser.parse_args(jupyter_commands)
| {
"content_hash": "9f32f8e48043361cec474640ba9099c2",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 107,
"avg_line_length": 36.02040816326531,
"alnum_prop": 0.6560906515580737,
"repo_name": "twosigma/beaker-notebook",
"id": "082309322ccc8b0d52bfef6145de06cb5b161424",
"size": "2359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beakerx_tabledisplay/beakerx_tabledisplay/commands.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "47603"
},
{
"name": "C++",
"bytes": "9890"
},
{
"name": "CSS",
"bytes": "14836"
},
{
"name": "HTML",
"bytes": "4901"
},
{
"name": "Java",
"bytes": "2257909"
},
{
"name": "JavaScript",
"bytes": "656859"
},
{
"name": "Jupyter Notebook",
"bytes": "1361818"
},
{
"name": "Python",
"bytes": "57917"
},
{
"name": "Scala",
"bytes": "3664"
}
],
"symlink_target": ""
} |
import angr
import logging
l = logging.getLogger(name=__name__)
PROT_READ = 0x1 # /* Page can be read. */
PROT_WRITE = 0x2 # /* Page can be written. */
PROT_EXEC = 0x4 # /* Page can be executed. */
PROT_NONE = 0x0 # /* Page can not be accessed. */
MAP_SHARED = 0x01 # /* Share changes. */
MAP_PRIVATE = 0x02 # /* Changes are private. */
MAP_ANONYMOUS = 0x20 # /* Don't use a file. */
MAP_FIXED = 0x10 # /* Interpret addr exactly. */
class mmap(angr.SimProcedure):
def run(self, addr, length, prot, flags, fd, offset): #pylint:disable=arguments-differ,unused-argument
#if self.state.solver.symbolic(flags) or self.state.solver.eval(flags) != 0x22:
# raise Exception("mmap with other than MAP_PRIVATE|MAP_ANONYMOUS unsupported")
l.debug("mmap(%s, %s, %s, %s, %s, %s) = ...", addr, length, prot, flags, fd, offset)
if self.state.solver.is_false(fd[31:0] == -1):
raise angr.errors.SimPosixError("Trying to map a file descriptor. I cannot deal with this.")
#
# Length
#
if self.state.solver.symbolic(length):
size = self.state.solver.max_int(length)
if size > self.state.libc.max_variable_size:
l.warning("mmap size requested of %d exceeds libc.max_variable_size. Using size %d instead.", size,self.state.libc.max_variable_size)
size = self.state.libc.max_variable_size
else:
size = self.state.solver.eval(length)
#
# Addr
#
# Not handling symbolic addr for now
addrs = self.state.solver.eval_upto(addr,2)
if len(addrs) == 2:
err = "Cannot handle symbolic addr argument for mmap."
l.error(err)
raise angr.errors.SimPosixError(err)
addr = addrs[0]
# Call is asking for system to provide an address
if addr == 0:
addr = self.allocate_memory(size)
#
# Flags
#
# Only want concrete flags
flags = self.state.solver.eval_upto(flags,2)
if len(flags) == 2:
err = "Cannot handle symbolic flags argument for mmap."
l.error(err)
raise angr.errors.SimPosixError(err)
flags = flags[0]
# Sanity check. All mmap must have exactly one of MAP_SHARED or MAP_PRIVATE
if (flags & MAP_SHARED and flags & MAP_PRIVATE) or flags & (MAP_SHARED | MAP_PRIVATE) == 0:
l.debug('... = -1 (bad flags)')
return self.state.solver.BVV(-1, self.state.arch.bits)
while True:
try:
self.state.memory.map_region(addr, size, prot[2:0], init_zero=bool(flags & MAP_ANONYMOUS))
l.debug('... = %#x', addr)
return addr
except angr.SimMemoryError:
# This page is already mapped
if flags & MAP_FIXED:
l.debug('... = -1 (MAP_FIXED failure)')
return self.state.solver.BVV(-1, self.state.arch.bits)
# Can't give you that address. Find a different one and loop back around to try again.
addr = self.allocate_memory(size)
def allocate_memory(self,size):
addr = self.state.heap.mmap_base
new_base = addr + size
if new_base & 0xfff:
new_base = (new_base & ~0xfff) + 0x1000
self.state.heap.mmap_base = new_base
return addr
| {
"content_hash": "cd0d34010dd55cec3bc5c142671ad5d1",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 149,
"avg_line_length": 34.16504854368932,
"alnum_prop": 0.5575447570332481,
"repo_name": "schieb/angr",
"id": "b8035c3699cbcdeb0446401a153e6dd591896cb5",
"size": "3519",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "angr/procedures/posix/mmap.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39522"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "739"
},
{
"name": "Python",
"bytes": "4987778"
}
],
"symlink_target": ""
} |
"""logging api
Revision ID: c8c222d42aa9
Revises: 62c781cb6192
Create Date: 2017-05-30 11:51:08.173604
"""
# revision identifiers, used by Alembic.
revision = 'c8c222d42aa9'
down_revision = '62c781cb6192'
from alembic import op
import sqlalchemy as sa
from neutron_lib.db import constants as db_const
def upgrade():
op.create_table(
'logs',
sa.Column('project_id',
sa.String(length=db_const.PROJECT_ID_FIELD_SIZE),
nullable=True,
index=True),
sa.Column('id', sa.String(length=db_const.UUID_FIELD_SIZE),
nullable=False),
sa.Column('standard_attr_id', sa.BigInteger(), nullable=False),
sa.Column('name', sa.String(length=db_const.NAME_FIELD_SIZE),
nullable=True),
sa.Column('resource_type', sa.String(length=36), nullable=False),
sa.Column('resource_id', sa.String(length=db_const.UUID_FIELD_SIZE),
nullable=True,
index=True),
sa.Column('target_id', sa.String(length=db_const.UUID_FIELD_SIZE),
nullable=True,
index=True),
sa.Column('event', sa.String(length=255), nullable=False),
sa.Column('enabled', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.ForeignKeyConstraint(['standard_attr_id'],
['standardattributes.id'],
ondelete='CASCADE'),
sa.UniqueConstraint('standard_attr_id'))
| {
"content_hash": "a1c2b7c7bf56372b07e5587fb1057f84",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 76,
"avg_line_length": 34.17777777777778,
"alnum_prop": 0.5858257477243173,
"repo_name": "eayunstack/neutron",
"id": "4c41a1fc5856281943a0d06cbf401cf3496033e3",
"size": "2153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/db/migration/alembic_migrations/versions/pike/expand/c8c222d42aa9_logging_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
} |
import wpl
class LibraryAccount(wpl.LibraryAccount):
def login_url(self):
return 'https://books.kpl.org/iii/cas/login?service=' + \
'https://books.kpl.org/patroninfo~S2/j_acegi_cas_security_check&lang=eng&scope=2'
| {
"content_hash": "d25b6f6e644abd36eba5103fc44341d2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 96,
"avg_line_length": 34.714285714285715,
"alnum_prop": 0.6707818930041153,
"repo_name": "LibraryHippo/LibraryHippo",
"id": "201aac4060d7b5bedb81705bf09cea55a99282b5",
"size": "266",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "App/kpl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "CSS",
"bytes": "3022"
},
{
"name": "HTML",
"bytes": "34270"
},
{
"name": "JavaScript",
"bytes": "3492"
},
{
"name": "Python",
"bytes": "516004"
}
],
"symlink_target": ""
} |
import mufsim.gamedb as db
import mufsim.stackitems as si
from mufsim.interface import network_interface as netifc
from mufsim.logger import log
from mufsim.errors import MufRuntimeError
from mufsim.insts.base import Instruction, instr
@instr("descriptors")
class InstDescriptors(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
for descr in descrs:
fr.data_push(descr)
fr.data_push(len(descrs))
@instr("descr_array")
class InstDescrArray(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value == -1:
descrs = netifc.get_descriptors()
else:
if db.getobj(who).objtype != "player":
raise MufRuntimeError("Expected #-1 or player dbref.")
descrs = netifc.user_descrs(who.value)
fr.data_push_list(descrs)
@instr("descrcon")
class InstDescrCon(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_con(descr))
@instr("descrdbref")
class InstDescrDBRef(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
ref = netifc.descr_dbref(descr)
obj = si.DBRef(ref)
fr.data_push(obj)
@instr("descr_setuser")
class InstDescrSetUser(Instruction):
def execute(self, fr):
fr.check_underflow(3)
pw = fr.data_pop(str)
who = fr.data_pop_object()
descr = fr.data_pop(int)
if who.objtype != "player":
raise MufRuntimeError("Expected player dbref.")
was = netifc.descr_dbref(descr)
if db.getobj(who).password != pw:
raise MufRuntimeError("Incorrect password!")
if netifc.descr_set_user(descr, who.dbref):
was = db.getobj(was)
# TODO: actually check password?
log("RECONNECTED DESCRIPTOR %d FROM %s TO %s USING PW '%s'" %
(descr, was, who, pw))
@instr("descrboot")
class InstDescrBoot(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.descr_disconnect(descr):
log("BOOTED DESCRIPTOR %d: %s" % (descr, db.getobj(who)))
@instr("descrnotify")
class InstDescrNotify(Instruction):
def execute(self, fr):
fr.check_underflow(2)
msg = fr.data_pop(str)
descr = fr.data_pop(int)
who = netifc.descr_dbref(descr)
if netifc.is_descr_online(descr):
log("NOTIFY TO DESCR %d, %s: %s" %
(descr, db.getobj(who), msg))
@instr("descrflush")
class InstDescrFlush(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
if descr == -1:
netifc.flush_all_descrs()
log("DESCRFLUSH ALL DESCRS.")
elif netifc.is_descr_online(descr):
netifc.descr_flush(descr)
who = netifc.descr_dbref(descr)
log("DESCRFLUSH %d, %s" % (descr, db.getobj(who)))
@instr("descr")
class InstDescr(Instruction):
def execute(self, fr):
# TODO: get real descr.
fr.data_push(db.getobj(fr.user).descr)
@instr("firstdescr")
class InstFirstDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[0])
else:
fr.data_push(0)
@instr("lastdescr")
class InstLastDescr(Instruction):
def execute(self, fr):
who = fr.data_pop_dbref()
if who.value < 0:
descrs = netifc.get_descriptors()
else:
descrs = netifc.user_descrs(who.value)
if descrs:
fr.data_push(descrs[-1])
else:
fr.data_push(0)
@instr("nextdescr")
class InstNextDescr(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
descrs = netifc.get_descriptors()
if descr in descrs:
pos = descrs.index(descr) + 1
if pos >= len(descrs):
fr.data_push(0)
else:
fr.data_push(descrs[pos])
else:
fr.data_push(0)
@instr("descrbufsize")
class InstDescrBufSize(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_bufsize(descr))
@instr("descrsecure?")
class InstDescrSecureP(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(1 if netifc.descr_secure(descr) else 0)
@instr("descruser")
class InstDescrUser(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
who = netifc.descr_user(descr)
if who >= 0:
fr.data_push(db.getobj(who).name)
else:
fr.data_push("")
@instr("descrhost")
class InstDescrHost(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(netifc.descr_host(descr))
@instr("descrtime")
class InstDescrTime(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_time(descr)))
@instr("descridle")
class InstDescrIdle(Instruction):
def execute(self, fr):
descr = fr.data_pop(int)
fr.data_push(int(netifc.descr_idle(descr)))
@instr("descrleastidle")
class InstDescrLeastIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(min(idles))
@instr("descrmostidle")
class InstDescrMostIdle(Instruction):
def execute(self, fr):
who = fr.data_pop_object()
descrs = netifc.user_descrs(who.dbref)
idles = [netifc.descr_idle(descr) for descr in descrs]
fr.data_push(max(idles))
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| {
"content_hash": "bbe432e18bdecd1deaffcb032c0bc797",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 73,
"avg_line_length": 28.227272727272727,
"alnum_prop": 0.5969404186795492,
"repo_name": "revarbat/mufsim",
"id": "7623aa8ade4fbf24e21db361224fa3796468cd88",
"size": "6210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mufsim/insts/descriptors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Forth",
"bytes": "30297"
},
{
"name": "Makefile",
"bytes": "1027"
},
{
"name": "Python",
"bytes": "471958"
},
{
"name": "Shell",
"bytes": "606"
}
],
"symlink_target": ""
} |
from .base import CreditCardProvider
from .gencc import amexPrefixList
from .gencc import credit_card_number
from random import Random
class AmexProvider(CreditCardProvider):
def __init__(self, *args, **kwargs):
super(AmexProvider, self).__init__(*args, **kwargs)
def get_credit_card_number(self):
return credit_card_number(Random().seed(), amexPrefixList, 15, 5)
# vim: filetype=python
| {
"content_hash": "6b669014cd1a2dee256ba9417c7992ab",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 73,
"avg_line_length": 29.642857142857142,
"alnum_prop": 0.7156626506024096,
"repo_name": "ryankanno/vor",
"id": "ce45992f10dac30d187644fada1f7f57ba86506d",
"size": "462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vor/backends/cc/amex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Python",
"bytes": "22731"
}
],
"symlink_target": ""
} |
"""Validate solutions by enforcing file and table formats
"""
import os
import re
import logging
import time
import random
import requests
from lxml import html as HTMLParser
def read_solutions_table(readme_fp):
def parse_line(line):
elems = [el for el in line.strip().split('|') if el]
question_elem = elems[1]
match = re.search(r'\[(.+?)\]\((.+)\)', question_elem)
if not match:
raise ValueError('{} is not a valid link'.format(question_elem))
elems[1] = (match.group(1), match.group(2))
solution_elem = elems[2]
match = re.search(r'\[(.+?)\]\((.+)\)', solution_elem)
if not match:
raise ValueError('{} is not a valid link'.format(solution_elem))
elems[2] = (match.group(1).split(','), match.group(2))
return elems
data = []
in_content = begin_parsing = False
with open(readme_fp) as ifile:
for line in ifile:
if line.startswith('### Contents'):
in_content = True
elif in_content and line.startswith('|---'):
begin_parsing = True
elif begin_parsing and line.startswith('|'):
data.append(parse_line(line))
elif begin_parsing and not line.strip():
begin_parsing = False
in_content = False
return data
def validate_questions(solutions_data, retry=7, sleep=30, **kwargs): #pylint: disable=unused-argument
ua = (
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
)
for elems in solutions_data:
# question_number = elems[0]
question_title, question_link = elems[1]
for _ in range(retry):
try:
with requests.Session() as ses:
ses.headers.update({'User-Agent': ua})
res = ses.get(question_link, timeout=sleep)
parsed = HTMLParser.fromstring(res.content)
title = parsed.cssselect('title')
if not title:
raise ValueError('No question found')
cleaned_title = title[0].text.split(' - LeetCode')[0].strip()
if cleaned_title == question_title:
logging.getLogger(__name__).info(
'Question validation done for %s', cleaned_title
)
break
else:
raise ValueError('Not matched: {} vs {}'.format(
cleaned_title, question_title
))
except Exception as err: #pylint: disable=broad-except
logging.getLogger(__name__).warning(
'Error during validate question %s: %s', question_title, err
)
time.sleep(random.random() * sleep)
else:
raise ValueError('{} seems invalid'.format(question_link))
logging.getLogger(__name__).info('%d questions verified', len(solutions_data))
def validate_solutions(solutions_data, **kwargs):
curr_dir = kwargs['curr_dir']
language_map = {
'cpp': 'C++', 'py': 'Python', 'java': 'Java',
'sql': 'SQL', 'sh': 'Shell',
}
for elems in solutions_data:
track = set(elems[2][0])
dirpath = os.path.join(curr_dir, elems[2][1])
for fname in os.listdir(dirpath):
fpart, ext = os.path.splitext(fname)
if fpart != 'Solution' and fpart != "solution":
raise ValueError('{} is an invalid filename'.format(fpart))
try:
track.remove(language_map.get(ext[1:]))
except KeyError:
pass
if track:
raise ValueError('Langugage remaining: {} for {}'.format(track, elems[1][0]))
logging.getLogger(__name__).info(
'%s solution validation done', elems[1][0]
)
logging.getLogger(__name__).info('%d solutions verified', len(solutions_data))
def main():
curr_dir = os.path.dirname(os.path.realpath(__file__))
readme_fp = os.path.join(curr_dir, 'README.md')
solutions_data = read_solutions_table(readme_fp)
# TODO: disable question validation now as leetcode changed to graphql: #42
# validate_questions(solutions_data, curr_dir=curr_dir)
validate_solutions(solutions_data, curr_dir=curr_dir)
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.INFO
)
try:
main()
except Exception as err: #pylint: disable=broad-except
logging.getLogger(__name__).exception('Exception: %s', err)
raise
| {
"content_hash": "2a8c4323e376c86c56a4b10fc6ff4027",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 102,
"avg_line_length": 39.170731707317074,
"alnum_prop": 0.5471149854711499,
"repo_name": "franklingu/leetcode-solutions",
"id": "2e54d5b98ea493b2cd677677b4f94665016ebfe2",
"size": "4840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validate_solutions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8919"
},
{
"name": "Java",
"bytes": "173033"
},
{
"name": "Python",
"bytes": "996874"
},
{
"name": "Shell",
"bytes": "2559"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, unicode_literals
import os
from django import VERSION as DJANGO_VERSION
from django.utils.translation import ugettext_lazy as _
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "media-library"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['127.0.0.1']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.mysql",
# DB name or path to database file if using sqlite3.
"NAME": "lolsteak",
# Not used with sqlite3.
"USER": "lolsteak",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "127.0.0.1",
# Set to empty string for default. Not used with sqlite3.
"PORT": "3306",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(PROJECT_ROOT, "templates")
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.static",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
],
"builtins": [
"mezzanine.template.loader_tags",
],
},
},
]
if DJANGO_VERSION < (1, 9):
del TEMPLATES[0]["OPTIONS"]["builtins"]
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
"mezzanine.accounts",
"django_tables2",
"lolsteak",
# "mezzanine.mobile",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
# Instead of doing "from .local_settings import *", we use exec so that
# local_settings has full access to everything defined in this module.
# Also force into sys.modules so it's visible to Django's autoreload.
f = os.path.join(PROJECT_APP_PATH, "local_settings.py")
if os.path.exists(f):
import sys
import imp
module_name = "%s.local_settings" % PROJECT_APP
module = imp.new_module(module_name)
module.__file__ = f
sys.modules[module_name] = module
exec(open(f, "rb").read())
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
####################
# ACCOUNT SETTINGS #
####################
ACCOUNTS_PROFILE_VIEWS_ENABLE = True
ACCOUNTS_NO_USERNAME = True
ACCOUNTS_PROFILE_FORM_CLASS = 'lolsteak.forms.ProfileForm'
| {
"content_hash": "97c84642187c2f67c02f3cab3eadf1d8",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 79,
"avg_line_length": 34.57471264367816,
"alnum_prop": 0.676529255319149,
"repo_name": "Nick011/lolsteak",
"id": "18d15186a6b6afd4b9221b689a2b997c7ddd03cb",
"size": "12033",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lolsteak/lolsteak/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7049"
},
{
"name": "Python",
"bytes": "54843"
},
{
"name": "Shell",
"bytes": "1271"
}
],
"symlink_target": ""
} |
import re
import sys
from operator import add
from pyspark import SparkContext
def computeContribs(urls, rank):
"""Calculates URL contributions to the rank of other URLs."""
num_urls = len(urls)
for url in urls:
yield (url, rank / num_urls)
def parseNeighbors(urls):
"""Parses a urls pair string into urls pair."""
parts = re.split(r'\s+', urls)
# Modified by Lv: accept last two values from HiBench generated PageRank data format.
return parts[-2], parts[-1]
if __name__ == "__main__":
if len(sys.argv) != 4:
print >> sys.stderr, "Usage: pagerank <input_file> <output_file> <iterations>"
exit(-1)
# Initialize the spark context.
sc = SparkContext(appName="PythonPageRank")
# Loads in input file. It should be in format of:
# URL neighbor URL
# URL neighbor URL
# URL neighbor URL
# ...
lines = sc.textFile(sys.argv[1], 1)
# Loads all URLs from input file and initialize their neighbors.
links = lines.map(lambda urls: parseNeighbors(urls)).groupByKey().cache()
# Loads all URLs with other URL(s) link to from input file and initialize ranks of them to one.
ranks = links.map(lambda (url, neighbors): (url, 1.0))
# Calculates and updates URL ranks continuously using PageRank algorithm.
for iteration in xrange(int(sys.argv[3])):
# Calculates URL contributions to the rank of other URLs.
contribs = links.join(ranks).flatMap(
lambda (url, (urls, rank)): computeContribs(urls, rank))
# Re-calculates URL ranks based on neighbor contributions.
ranks = contribs.reduceByKey(add).mapValues(lambda rank: rank * 0.85 + 0.15)
# Collects all URL ranks and dump them to console.
# for (link, rank) in ranks.collect():
# print "%s has rank: %s." % (link, rank)
ranks.saveAsTextFile(sys.argv[2])
| {
"content_hash": "1797bf8d4f965a24682cb3bf87509824",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 99,
"avg_line_length": 34.81818181818182,
"alnum_prop": 0.6428198433420366,
"repo_name": "thrill/fst-bench",
"id": "9fc8389798f22ffeead4a547275b61e3d151a153",
"size": "2882",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sparkbench/src/main/python/pagerank.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "18179"
},
{
"name": "Java",
"bytes": "689674"
},
{
"name": "Python",
"bytes": "127137"
},
{
"name": "Scala",
"bytes": "87304"
},
{
"name": "Shell",
"bytes": "180913"
}
],
"symlink_target": ""
} |
import threading
import logging
import struct
import time
from typing import Callable, Optional
from .network import CanError
logger = logging.getLogger(__name__)
NMT_STATES = {
0: 'INITIALISING',
4: 'STOPPED',
5: 'OPERATIONAL',
80: 'SLEEP',
96: 'STANDBY',
127: 'PRE-OPERATIONAL'
}
NMT_COMMANDS = {
'OPERATIONAL': 1,
'STOPPED': 2,
'SLEEP': 80,
'STANDBY': 96,
'PRE-OPERATIONAL': 128,
'INITIALISING': 129,
'RESET': 129,
'RESET COMMUNICATION': 130
}
COMMAND_TO_STATE = {
1: 5,
2: 4,
80: 80,
96: 96,
128: 127,
129: 0,
130: 0
}
class NmtBase(object):
"""
Can set the state of the node it controls using NMT commands and monitor
the current state using the heartbeat protocol.
"""
def __init__(self, node_id: int):
self.id = node_id
self.network = None
self._state = 0
def on_command(self, can_id, data, timestamp):
cmd, node_id = struct.unpack_from("BB", data)
if node_id in (self.id, 0):
logger.info("Node %d received command %d", self.id, cmd)
if cmd in COMMAND_TO_STATE:
new_state = COMMAND_TO_STATE[cmd]
if new_state != self._state:
logger.info("New NMT state %s, old state %s",
NMT_STATES[new_state], NMT_STATES[self._state])
self._state = new_state
def send_command(self, code: int):
"""Send an NMT command code to the node.
:param code:
NMT command code.
"""
if code in COMMAND_TO_STATE:
new_state = COMMAND_TO_STATE[code]
logger.info("Changing NMT state on node %d from %s to %s",
self.id, NMT_STATES[self._state], NMT_STATES[new_state])
self._state = new_state
@property
def state(self) -> str:
"""Attribute to get or set node's state as a string.
Can be one of:
- 'INITIALISING'
- 'PRE-OPERATIONAL'
- 'STOPPED'
- 'OPERATIONAL'
- 'SLEEP'
- 'STANDBY'
- 'RESET'
- 'RESET COMMUNICATION'
"""
if self._state in NMT_STATES:
return NMT_STATES[self._state]
else:
return self._state
@state.setter
def state(self, new_state: str):
if new_state in NMT_COMMANDS:
code = NMT_COMMANDS[new_state]
else:
raise ValueError("'%s' is an invalid state. Must be one of %s." %
(new_state, ", ".join(NMT_COMMANDS)))
self.send_command(code)
class NmtMaster(NmtBase):
def __init__(self, node_id: int):
super(NmtMaster, self).__init__(node_id)
self._state_received = None
self._node_guarding_producer = None
#: Timestamp of last heartbeat message
self.timestamp: Optional[float] = None
self.state_update = threading.Condition()
self._callbacks = []
def on_heartbeat(self, can_id, data, timestamp):
with self.state_update:
self.timestamp = timestamp
new_state, = struct.unpack_from("B", data)
# Mask out toggle bit
new_state &= 0x7F
logger.debug("Received heartbeat can-id %d, state is %d", can_id, new_state)
for callback in self._callbacks:
callback(new_state)
if new_state == 0:
# Boot-up, will go to PRE-OPERATIONAL automatically
self._state = 127
else:
self._state = new_state
self._state_received = new_state
self.state_update.notify_all()
def send_command(self, code: int):
"""Send an NMT command code to the node.
:param code:
NMT command code.
"""
super(NmtMaster, self).send_command(code)
logger.info(
"Sending NMT command 0x%X to node %d", code, self.id)
self.network.send_message(0, [code, self.id])
def wait_for_heartbeat(self, timeout: float = 10):
"""Wait until a heartbeat message is received."""
with self.state_update:
self._state_received = None
self.state_update.wait(timeout)
if self._state_received is None:
raise NmtError("No boot-up or heartbeat received")
return self.state
def wait_for_bootup(self, timeout: float = 10) -> None:
"""Wait until a boot-up message is received."""
end_time = time.time() + timeout
while True:
now = time.time()
with self.state_update:
self._state_received = None
self.state_update.wait(end_time - now + 0.1)
if now > end_time:
raise NmtError("Timeout waiting for boot-up message")
if self._state_received == 0:
break
def add_hearbeat_callback(self, callback: Callable[[int], None]):
"""Add function to be called on heartbeat reception.
:param callback:
Function that should accept an NMT state as only argument.
"""
self._callbacks.append(callback)
def start_node_guarding(self, period: float):
"""Starts the node guarding mechanism.
:param period:
Period (in seconds) at which the node guarding should be advertised to the slave node.
"""
if self._node_guarding_producer : self.stop_node_guarding()
self._node_guarding_producer = self.network.send_periodic(0x700 + self.id, None, period, True)
def stop_node_guarding(self):
"""Stops the node guarding mechanism."""
if self._node_guarding_producer is not None:
self._node_guarding_producer.stop()
self._node_guarding_producer = None
class NmtSlave(NmtBase):
"""
Handles the NMT state and handles heartbeat NMT service.
"""
def __init__(self, node_id: int, local_node):
super(NmtSlave, self).__init__(node_id)
self._send_task = None
self._heartbeat_time_ms = 0
self._local_node = local_node
def on_command(self, can_id, data, timestamp):
super(NmtSlave, self).on_command(can_id, data, timestamp)
self.update_heartbeat()
def send_command(self, code: int) -> None:
"""Send an NMT command code to the node.
:param code:
NMT command code.
"""
old_state = self._state
super(NmtSlave, self).send_command(code)
if self._state == 0:
logger.info("Sending boot-up message")
self.network.send_message(0x700 + self.id, [0])
# The heartbeat service should start on the transition
# between INITIALIZING and PRE-OPERATIONAL state
if old_state == 0 and self._state == 127:
heartbeat_time_ms = self._local_node.sdo[0x1017].raw
self.start_heartbeat(heartbeat_time_ms)
else:
self.update_heartbeat()
def on_write(self, index, data, **kwargs):
if index == 0x1017:
hearbeat_time, = struct.unpack_from("<H", data)
if hearbeat_time == 0:
self.stop_heartbeat()
else:
self.start_heartbeat(hearbeat_time)
def start_heartbeat(self, heartbeat_time_ms: int):
"""Start the hearbeat service.
:param hearbeat_time
The heartbeat time in ms. If the heartbeat time is 0
the heartbeating will not start.
"""
self._heartbeat_time_ms = heartbeat_time_ms
self.stop_heartbeat()
if heartbeat_time_ms > 0:
logger.info("Start the hearbeat timer, interval is %d ms", self._heartbeat_time_ms)
self._send_task = self.network.send_periodic(
0x700 + self.id, [self._state], heartbeat_time_ms / 1000.0)
def stop_heartbeat(self):
"""Stop the hearbeat service."""
if self._send_task is not None:
logger.info("Stop the heartbeat timer")
self._send_task.stop()
self._send_task = None
def update_heartbeat(self):
if self._send_task is not None:
self._send_task.update([self._state])
class NmtError(Exception):
"""Some NMT operation failed."""
| {
"content_hash": "e9dca12e18dc68c9590aede0f0563d37",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 102,
"avg_line_length": 31.481060606060606,
"alnum_prop": 0.5639513897244616,
"repo_name": "christiansandberg/canopen",
"id": "09963de06e4fe98cde73f3e5474933156539fd43",
"size": "8311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canopen/nmt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "237077"
},
{
"name": "Shell",
"bytes": "868"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/furniture/all/shared_frn_all_lamp_candlestick_free_s02.iff"
result.attribute_template_id = 6
result.stfName("frn_n","frn_lamp_candlestick_dearic")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "f07bf0d6bf407ac6c3f65106fb1926d7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 95,
"avg_line_length": 26,
"alnum_prop": 0.7100591715976331,
"repo_name": "anhstudios/swganh",
"id": "a6adfb799d14255ac56ad59537e7b153cdf17959",
"size": "483",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/furniture/all/shared_frn_all_lamp_candlestick_free_s02.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
from gen_callback import *
from gen_caller import *
from gen_init_function import *
from gen_signatures import *
from gen_singleton import *
from gen_extclass import *
def gen_all(args):
open('callback.hpp', 'w').write(gen_callback(args))
open('caller.hpp', 'w').write(gen_caller(args))
open('init_function.hpp', 'w').write(gen_init_function(args))
open('signatures.hpp', 'w').write(gen_signatures(args))
open('singleton.hpp', 'w').write(gen_singleton(args))
open('extension_class.hpp', 'w').write(gen_extclass(args))
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
args = 10
else:
args = int(sys.argv[1])
print gen_all(args)
| {
"content_hash": "4123f6ce26e423314449b4753e7cf961",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 65,
"avg_line_length": 27.115384615384617,
"alnum_prop": 0.6354609929078014,
"repo_name": "Ezeer/VegaStrike_win32FR",
"id": "3877d181ff32d8daf2178f777f6e815779d57240",
"size": "705",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vegastrike/boost/1_28/src/gen_all.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4197693"
},
{
"name": "C++",
"bytes": "99169723"
},
{
"name": "Objective-C",
"bytes": "135840"
},
{
"name": "Perl",
"bytes": "21684"
},
{
"name": "Python",
"bytes": "186872"
},
{
"name": "Shell",
"bytes": "114240"
},
{
"name": "Standard ML",
"bytes": "2678"
}
],
"symlink_target": ""
} |
import datetime
from decimal import Decimal
import hashlib
import logging
import urllib
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.timezone import utc
from django.utils.translation import ugettext_lazy as _
from getpaid import signals
from getpaid.backends import PaymentProcessorBase
logger = logging.getLogger('getpaid.backends.dotpay')
class DotpayTransactionStatus:
STARTED = 1
FINISHED = 2
REJECTED = 3
REFUNDED = 4
RECLAMATION = 5
class PaymentProcessor(PaymentProcessorBase):
BACKEND = 'getpaid.backends.dotpay'
BACKEND_NAME = _('Dotpay')
BACKEND_ACCEPTED_CURRENCY = ('PLN', 'EUR', 'USD', 'GBP', 'JPY', 'CZK', 'SEK')
BACKEND_LOGO_URL = 'getpaid/backends/dotpay/dotpay_logo.png'
_ALLOWED_IP = ('195.150.9.37', )
_ACCEPTED_LANGS = ('pl', 'en', 'de', 'it', 'fr', 'es', 'cz', 'ru', 'bg')
_GATEWAY_URL = 'https://ssl.dotpay.eu/'
_ONLINE_SIG_FIELDS = ('id', 'control', 't_id', 'amount', 'email', 'service', 'code', 'username', 'password', 't_status')
@staticmethod
def compute_sig(params, fields, PIN):
text = PIN + ":" + (u":".join(map(lambda field: params.get(field, ''), fields)))
return hashlib.md5(text).hexdigest()
@staticmethod
def online(params, ip):
allowed_ip = PaymentProcessor.get_backend_setting('allowed_ip', PaymentProcessor._ALLOWED_IP)
if len(allowed_ip) != 0 and ip not in allowed_ip:
logger.warning('Got message from not allowed IP %s' % str(allowed_ip))
return 'IP ERR'
PIN = PaymentProcessor.get_backend_setting('PIN', '')
if params['md5'] != PaymentProcessor.compute_sig(params, PaymentProcessor._ONLINE_SIG_FIELDS, PIN):
logger.warning('Got message with wrong sig, %s' % str(params))
return 'SIG ERR'
try:
params['id'] = int(params['id'])
except ValueError:
return 'ID ERR'
if params['id'] != int(PaymentProcessor.get_backend_setting('id')):
return 'ID ERR'
from getpaid.models import Payment
try:
payment = Payment.objects.get(pk=int(params['control']))
except (ValueError, Payment.DoesNotExist):
logger.error('Got message for non existing Payment, %s' % str(params))
return 'PAYMENT ERR'
amount, currency = params.get('orginal_amount', params['amount'] + ' PLN').split(' ')
if currency != payment.currency.upper():
logger.error('Got message with wrong currency, %s' % str(params))
return 'CURRENCY ERR'
payment.external_id = params.get('t_id', '')
payment.description = params.get('email', '')
if int(params['t_status']) == DotpayTransactionStatus.FINISHED:
payment.amount_paid = Decimal(amount)
payment.paid_on = datetime.datetime.utcnow().replace(tzinfo=utc)
if payment.amount <= Decimal(amount):
# Amount is correct or it is overpaid
payment.change_status('paid')
else:
payment.change_status('partially_paid')
elif int(params['t_status']) in [DotpayTransactionStatus.REJECTED, DotpayTransactionStatus.RECLAMATION, DotpayTransactionStatus.REFUNDED]:
payment.change_status('failed')
return 'OK'
def get_URLC(self):
urlc = reverse('getpaid-dotpay-online')
current_site = Site.objects.get_current()
if PaymentProcessor.get_backend_setting('force_ssl', False):
return 'https://%s%s' % (current_site.domain, urlc)
else:
return 'http://%s%s' % (current_site.domain, urlc)
def get_URL(self, pk):
current_site = Site.objects.get_current()
url = reverse('getpaid-dotpay-return', kwargs={'pk': pk})
if PaymentProcessor.get_backend_setting('force_ssl', False):
return 'https://%s%s' % (current_site.domain, url)
else:
return 'http://%s%s' % (current_site.domain, url)
def get_gateway_url(self, request):
"""
Routes a payment to Gateway, should return URL for redirection.
"""
params = {
'id': PaymentProcessor.get_backend_setting('id'),
'description': self.get_order_description(self.payment, self.payment.order),
'amount': self.payment.amount,
'currency': self.payment.currency,
'type': 0, # show "return" button after finished payment
'control': self.payment.pk,
'URL': self.get_URL(self.payment.pk),
'URLC': self.get_URLC(),
}
user_data = {
'email': None,
'lang': None,
}
signals.user_data_query.send(sender=None, order=self.payment.order, user_data=user_data)
if user_data['email']:
params['email'] = user_data['email']
if user_data['lang'] and user_data['lang'].lower() in PaymentProcessor._ACCEPTED_LANGS:
params['lang'] = user_data['lang'].lower()
elif PaymentProcessor.get_backend_setting('lang', False) and \
PaymentProcessor.get_backend_setting('lang').lower() in PaymentProcessor._ACCEPTED_LANGS:
params['lang'] = PaymentProcessor.get_backend_setting('lang').lower()
if PaymentProcessor.get_backend_setting('onlinetransfer', False):
params['onlinetransfer'] = 1
if PaymentProcessor.get_backend_setting('p_email', False):
params['p_email'] = PaymentProcessor.get_backend_setting('p_email')
if PaymentProcessor.get_backend_setting('p_info', False):
params['p_info'] = PaymentProcessor.get_backend_setting('p_info')
if PaymentProcessor.get_backend_setting('tax', False):
params['tax'] = 1
if PaymentProcessor.get_backend_setting('method', 'get').lower() == 'post':
return self._GATEWAY_URL, 'POST', params
elif PaymentProcessor.get_backend_setting('method', 'get').lower() == 'get':
for key in params.keys():
params[key] = unicode(params[key]).encode('utf-8')
return self._GATEWAY_URL + '?' + urllib.urlencode(params), "GET", {}
else:
raise ImproperlyConfigured('Dotpay payment backend accepts only GET or POST')
| {
"content_hash": "33cb5794a7ff2d08fc43edfc3273cfe8",
"timestamp": "",
"source": "github",
"line_count": 154,
"max_line_length": 146,
"avg_line_length": 41.62987012987013,
"alnum_prop": 0.6136328185930432,
"repo_name": "KrzysiekJ/django-getpaid",
"id": "33ca5919f27ac696616ab3bf0a3845c26ace3273",
"size": "6411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getpaid/backends/dotpay/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126792"
}
],
"symlink_target": ""
} |
import mui4py.mui4py_mod as mui4py_mod
from mui4py.common import CppClass, get_cpp_name, array2Point, _Point
from mui4py.config import Config
from mui4py.types import *
from mui4py.geometry import Geometry
from mui4py.samplers import Sampler, ChronoSampler
import numpy as np
import re
import copy
# TODO: Add communicator parameter
def create_unifaces(domain, ifaces_names, config):
assert type(ifaces_names) == list
assert type(domain) == str
assert issubclass(config.__class__, Config)
ifaces_out = {}
cpp_obj_name = get_cpp_name("create_uniface", config.dim,
config.float_type, config.int_type)
ifaceraw = getattr(mui4py_mod,cpp_obj_name)(domain, ifaces_names)
for i, obj in enumerate(ifaceraw):
ifaces_out[ifaces_names[i]] = Uniface(config=config,
cpp_obj=obj)
return ifaces_out
def get_mpi_version():
return mui4py_mod.get_mpi_version()
def get_compiler_version():
return mui4py_mod.get_compiler_version()
def get_compiler_config():
return mui4py_mod.get_compiler_config()
def mpi_split_by_app():
return mui4py_mod.mpi_split_by_app()
def set_quiet(q):
mui4py_mod.set_quiet(q)
def set_data_types_unifaces(ifaces, data):
for iface_name, iface in ifaces.items():
iface.set_data_types(data[iface_name])
# MUI Classes
class Uniface(CppClass):
def __init__(self, uri=None, cpp_obj=None, config=None):
super(Uniface, self).__init__(config, args=(uri,))
self.uri = uri
self.configure(self.config, cpp_obj=cpp_obj)
self.tags_type = {}
self._tags_spatial_samplers = {}
self._tags_chrono_samplers = {}
self._tags_fetch = {}
# self._tags_push = {}
self._ALLOWED_PROTOCOLS = ["mpi"]
def _check_uri(self):
# protocol://domain/interface
# if not re.match("[mpi]+//[
pass
def _get_tag_type(self, tag):
try:
return self.tags_type[tag]
except KeyError:
raise Exception("A type has not been defined for ata tag '{}'. Use 'Uniface.set_data_type()'.".format(tag))
def set_data_types(self, data):
for tag, data_type in data.items():
try:
self._set_data_type(tag, data_type)
except KeyError:
raise Exception("Uniface '{}' do not exist.".format(iface_name))
def _set_data_type(self, tag, data_type):
if data_type not in ALLOWED_IO_TYPES.keys():
raise Exception("Data type not supported. Supported types: {..}")
try:
data_type_stored = self.tags_type[tag]
except KeyError:
self.tags_type[tag] = data_type
self._tags_spatial_samplers[tag] = {}
self._tags_chrono_samplers[tag] = {}
self._tags_fetch[tag] = {}
else:
raise Exception("Type '{}' has already been defined for tag '{}'.".format(data_type_stored.__name__,tag))
def _get_pushfname(self, fname_root, tag, val=None, type_in=None):
assert val is not None or type is not None
stored_data_type = map_type[self._get_tag_type(tag)]
if self.config.force_casting:
data_type = stored_data_type
else:
if val is not None:
data_type = map_type[type(val)]
elif type is not None:
data_type = type_in
if stored_data_type != data_type:
raise Exception("Data type set for tag '{}' do not match with the data type of the value provided.".format(tag))
return (fname_root + ALLOWED_IO_TYPES[data_type], data_type)
def push(self, *args, **kwargs):
if len(args) == 1:
loc = array2Point(args[0], self.config, self.raw_point)
push_name = "push_"
pargs = (loc, )
elif len(args) == 2:
tag = args[0]
val = args[1]
push_fname, data_type = self._get_pushfname("push_", tag, val=val)
pargs = (tag, safe_cast(data_type, val))
elif len(args) == 3:
tag = args[0]
loc = array2Point(args[1], self.config, self.raw_point)
val = args[2]
push_fname, data_type = self._get_pushfname("push_", tag, val=val)
try:
pargs = (tag, loc, safe_cast(data_type, val))
except ValueError:
raise Exception("Forced type casting failed in push.")
else:
raise Exception("Push function accept 1, 2 or 3 parameters.")
push = getattr(self.raw, push_fname)
push(*pargs)
def push_many(self, tag, points, values):
#TODO: Try to apply safe_cast
push_fname, data_type = self._get_pushfname("push_many_", tag, type_in=values.dtype.type)
getattr(self.raw, push_fname)(tag, points, values)
def commit(self, tstamp):
return self.raw.commit(tstamp)
def barrier(self, t1, t2=None):
if t2 is not None:
self.raw.barrier(t1, t2)
else:
self.raw.barrier(t1)
def forget(self, tend, tbegin=0.0):
self.raw.forget(tend, True)
def forecast(self, timestamp):
self.raw.forecast(timestamp)
def is_ready(self, attr, t1, t2=None):
if t2 is not None:
self.raw.is_ready(attr, t1, t2)
else:
self.raw.is_ready(attr, t1)
def set_memory(self, t):
self.raw.set_memmory(t)
def assign(self, tag, val):
data_type = map_type[self._get_tag_type(tag)]
assign = getattr(self.raw, "assign_" + ALLOWED_IO_TYPES[data_type])
assign(tag, safe_cast(data_type, val))
def announce_recv_span(self, tinit, timeout, geometry):
assert issubclass(geometry.__class__, Geometry)
geometry.configure(self.config)
self.raw.announce_recv_span(tinit, timeout, geometry.raw)
def announce_send_span(self, tinit, timeout, geometry):
assert issubclass(geometry.__class__, Geometry)
geometry.configure(self.config)
self.raw.announce_send_span(tinit, timeout, geometry.raw)
def _get_fetch_5args(self, fname_root, tag, data_type, spatial_sampler, chrono_sampler):
assert issubclass(spatial_sampler.__class__, Sampler)
assert issubclass(chrono_sampler.__class__, ChronoSampler)
ss = None
cs = None
rehash_fetch = False
try:
ss = self._tags_spatial_samplers[tag][spatial_sampler.signature]
except KeyError:
ss = copy.copy(spatial_sampler)
ss.configure(self.config, data_type)
self._tags_spatial_samplers[tag][ss.signature] = ss
rehash_fetch = True
try:
cs = self._tags_chrono_samplers[tag][chrono_sampler.signature]
except KeyError:
cs = copy.copy(chrono_sampler)
cs.configure(self.config, data_type, onlycheck=True)
self._tags_chrono_samplers[tag][cs.signature] = cs
rehash_fetch = True
if rehash_fetch:
self._tags_fetch[tag][("fetch",cs.signature,ss.signature)] = "{}_{}_{}_{}".format("fetch",
ALLOWED_IO_TYPES[data_type],
ss.fetch_signature(),
cs.fetch_signature())
self._tags_fetch[tag][("fetch_many",cs.signature,ss.signature)] = "{}_{}_{}_{}".format("fetch_many",
ALLOWED_IO_TYPES[data_type],
ss.fetch_signature(),
cs.fetch_signature())
return self._tags_fetch[tag][(fname_root,cs.signature,ss.signature)], ss, cs
def _get_fetch_6args(self, fname_root, tag, data_type, spatial_sampler, chrono_sampler):
assert issubclass(spatial_sampler.__class__, Sampler)
assert issubclass(chrono_sampler.__class__, ChronoSampler)
ss = None
cs = None
rehash_fetch = False
try:
ss = self._tags_spatial_samplers[tag][spatial_sampler.signature]
except KeyError:
ss = copy.copy(spatial_sampler)
ss.configure(self.config, data_type)
self._tags_spatial_samplers[tag][ss.signature] = ss
rehash_fetch = True
try:
cs = self._tags_chrono_samplers[tag][chrono_sampler.signature]
except KeyError:
cs = copy.copy(chrono_sampler)
cs.configure(self.config, data_type, onlycheck=True)
self._tags_chrono_samplers[tag][cs.signature] = cs
rehash_fetch = True
if rehash_fetch:
self._tags_fetch[tag][("fetch6",cs.signature,ss.signature)] = "{}_{}_{}_{}".format("fetch",
ALLOWED_IO_TYPES[data_type],
ss.fetch_signature(),
cs.fetch_signature())
self._tags_fetch[tag][("fetch_many6",cs.signature,ss.signature)] = "{}_{}_{}_{}".format("fetch_many6",
ALLOWED_IO_TYPES[data_type],
ss.fetch_signature(),
cs.fetch_signature())
return self._tags_fetch[tag][(fname_root,cs.signature,ss.signature)], ss, cs
def fetch_points(self, tag, time):
data_type = map_type[self._get_tag_type(tag)]
fetch_points = getattr(self.raw, "fetch_points_" + ALLOWED_IO_TYPES[data_type])
return fetch_points(tag, time)
def fetch_many(self, tag, points, time, spatial_sampler, chrono_sampler):
fetch_fname, ss, cs = self._get_fetch_5args("fetch_many", tag, points.dtype.type, spatial_sampler, chrono_sampler)
fetch = getattr(self.raw, fetch_fname)
return fetch(tag, points, time, ss.raw, cs.raw)
def fetch_many6(self, tag, points, time1, time2, spatial_sampler, chrono_sampler):
fetch_fname, ss, cs = self._get_fetch_6args("fetch_many6", tag, points.dtype.type, spatial_sampler, chrono_sampler)
fetch = getattr(self.raw, fetch_fname)
return fetch(tag, points, time1, time2, ss.raw, cs.raw)
def fetch(self, *args, **kwargs):
tag = args[0]
data_type = map_type[self._get_tag_type(tag)]
if len(args) == 1:
fetch_fname = "fetch_" + ALLOWED_IO_TYPES[data_type]
fargs = (tag, )
if len(args) == 5:
loc = array2Point(args[1], self.config, self.raw_point)
time = args[2]
spatial_sampler = args[3]
chrono_sampler = args[4]
fetch_fname, ss, cs = self._get_fetch_5args("fetch", tag, data_type, spatial_sampler, chrono_sampler)
barrier_enabled = True
if type(time).__name__ == 'float':
barrier_time = mui4py_mod.numeric_limits_real
elif type(time).__name__ == 'int':
barrier_time = mui4py_mod.numeric_limits_int
else:
raise Exception("Unrecognized time type '{}'.".format(type(time).__name__))
fargs = (tag, loc, time, ss.raw, cs.raw, barrier_enabled)
if len(args) == 6:
loc = array2Point(args[1], self.config, self.raw_point)
time1 = args[2]
time2 = args[3]
spatial_sampler = args[4]
chrono_sampler = args[5]
fetch_fname, ss, cs = self._get_fetch_6args("fetch", tag, data_type, spatial_sampler, chrono_sampler)
barrier_enabled = True
if type(time1).__name__ == 'float':
barrier_time = mui4py_mod.numeric_limits_real
elif type(time1).__name__ == 'int':
barrier_time = mui4py_mod.numeric_limits_int
else:
raise Exception("Unrecognized time1 type '{}'.".format(type(time1).__name__))
if type(time1).__name__ != type(time2).__name__:
raise Exception("time1 type '{}'. doesn't same as time2 type".format(type(time1).__name__))
fargs = (tag, loc, time1, time2, ss.raw, cs.raw, barrier_enabled)
fetch = getattr(self.raw, fetch_fname)
return safe_cast(self._get_tag_type(tag), fetch(*fargs))
def Point (self, points):
return array2Point(points, self.config, self.raw_point) | {
"content_hash": "7b902b4708c118a9c725bed451e8224e",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 124,
"avg_line_length": 42.87542087542087,
"alnum_prop": 0.5506517983351656,
"repo_name": "yhtang/MUI",
"id": "ff40d61e46a951ff136da907989d411cfac04cd3",
"size": "12734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wrappers/Python/mui4py/mui4py.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6102"
},
{
"name": "C++",
"bytes": "250232"
},
{
"name": "Fortran",
"bytes": "4871"
},
{
"name": "Makefile",
"bytes": "1209"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
from django.utils.translation import gettext_lazy as _
class VersionConfig(AppConfig):
default_auto_field = "django.db.models.BigAutoField"
name = "osmaxx.version"
verbose_name = _("Version")
| {
"content_hash": "3fe8ff9d6ca9687aa70d6d9b4ec0d592",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 56,
"avg_line_length": 30,
"alnum_prop": 0.7416666666666667,
"repo_name": "geometalab/osmaxx",
"id": "ff0d44f93cf770990d8bfc2e77ea9d30d3cc21b5",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "osmaxx/version/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21768"
},
{
"name": "Dockerfile",
"bytes": "7740"
},
{
"name": "HTML",
"bytes": "280740"
},
{
"name": "JavaScript",
"bytes": "264630"
},
{
"name": "Jinja",
"bytes": "6869531"
},
{
"name": "Lua",
"bytes": "5473"
},
{
"name": "Makefile",
"bytes": "4873"
},
{
"name": "NSIS",
"bytes": "5370"
},
{
"name": "Python",
"bytes": "544979"
},
{
"name": "Roff",
"bytes": "1233"
},
{
"name": "Shell",
"bytes": "9501"
}
],
"symlink_target": ""
} |
from dal import autocomplete
from django import forms
from .models import TModel
class TForm(forms.ModelForm):
class Meta:
model = TModel
fields = ('name', 'test')
widgets = {
'test': autocomplete.ModelSelect2(url='secure_data')
}
| {
"content_hash": "369ae3b9585586fe715b484307c52a8e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 20.214285714285715,
"alnum_prop": 0.6148409893992933,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "720ea5f56f0a0c6f28dddb8b14df519709d10b5e",
"size": "283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_project/secure_data/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
import subprocess
import os
import sys
import shutil
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
def check_cmake_exists(cmake_command):
"""
Check whether CMake is installed. If not, print
informative error message and quits.
"""
p = subprocess.Popen('%s --version' % cmake_command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
if not ('cmake version' in p.communicate()[0].decode('UTF-8')):
sys.stderr.write(' This code is built using CMake\n\n')
sys.stderr.write(' CMake is not found\n')
sys.stderr.write(' get CMake at http://www.cmake.org/\n')
sys.stderr.write(' on many clusters CMake is installed\n')
sys.stderr.write(' but you have to load it first:\n')
sys.stderr.write(' $ module load cmake\n')
sys.exit(1)
def setup_build_path(build_path):
"""
Create build directory. If this already exists, print informative
error message and quit.
"""
if os.path.isdir(build_path):
fname = os.path.join(build_path, 'CMakeCache.txt')
if os.path.exists(fname):
sys.stderr.write('aborting setup\n')
sys.stderr.write('build directory %s which contains CMakeCache.txt already exists\n' % build_path)
sys.stderr.write('remove the build directory and then rerun setup\n')
sys.exit(1)
else:
os.makedirs(build_path, 0o755)
def test_adapt_cmake_command_to_platform():
cmake_command = "FC=foo CC=bar CXX=RABOOF cmake -DTHIS -DTHAT='this and that cmake' .."
res = adapt_cmake_command_to_platform(cmake_command, 'linux')
assert res == cmake_command
res = adapt_cmake_command_to_platform(cmake_command, 'win32')
assert res == "set FC=foo && set CC=bar && set CXX=RABOOF && cmake -DTHIS -DTHAT='this and that cmake' .."
cmake_command = "cmake -DTHIS -DTHAT='this and that cmake' .."
res = adapt_cmake_command_to_platform(cmake_command, 'linux')
assert res == cmake_command
res = adapt_cmake_command_to_platform(cmake_command, 'win32')
assert res == cmake_command
def adapt_cmake_command_to_platform(cmake_command, platform):
"""
Adapt CMake command to MS Windows platform.
"""
if platform == 'win32':
pos = cmake_command.find('cmake')
s = ['set %s &&' % e for e in cmake_command[:pos].split()]
s.append(cmake_command[pos:])
return ' '.join(s)
else:
return cmake_command
def run_cmake(command, build_path, default_build_path):
"""
Execute CMake command.
"""
topdir = os.getcwd()
os.chdir(build_path)
p = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout_coded, stderr_coded = p.communicate()
stdout = stdout_coded.decode('UTF-8')
stderr = stderr_coded.decode('UTF-8')
if stderr:
sys.stderr.write(stderr)
sys.exit(1)
# print cmake output to screen
print(stdout)
# write cmake output to file
f = open('cmake_output', 'w')
f.write(stdout)
f.close()
# change directory and return
os.chdir(topdir)
if 'Configuring incomplete' in stdout:
# configuration was not successful
if (build_path == default_build_path):
# remove build_path iff not set by the user
# otherwise removal can be dangerous
shutil.rmtree(default_build_path)
else:
# configuration was successful
save_setup_command(sys.argv, build_path)
print_build_help(build_path, default_build_path)
def print_build_help(build_path, default_build_path):
"""
Print help text after configuration step is done.
"""
print(' configure step is done')
print(' now you need to compile the sources:')
if (build_path == default_build_path):
print(' $ cd build')
else:
print(' $ cd ' + build_path)
print(' $ make')
def save_setup_command(argv, build_path):
"""
Save setup command to a file.
"""
file_name = os.path.join(build_path, 'setup_command')
f = open(file_name, 'w')
f.write(' '.join(argv[:]) + '\n')
f.close()
def configure(root_directory, build_path, cmake_command, only_show):
"""
Main configure function.
"""
default_build_path = os.path.join(root_directory, 'build')
# check that CMake is available, if not stop
check_cmake_exists('cmake')
# deal with build path
if build_path is None:
build_path = default_build_path
if not only_show:
setup_build_path(build_path)
cmake_command = adapt_cmake_command_to_platform(cmake_command, sys.platform)
print('%s\n' % cmake_command)
if only_show:
sys.exit(0)
run_cmake(cmake_command, build_path, default_build_path)
| {
"content_hash": "b419d525b4df75db2a8cbda958ef6d7d",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 110,
"avg_line_length": 31.80625,
"alnum_prop": 0.609157005305561,
"repo_name": "miroi/sec_quant_F90",
"id": "a635d408bab8c6ac2b51c19a528fe9b726397e6b",
"size": "5211",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cmake/lib/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "94"
},
{
"name": "CMake",
"bytes": "28232"
},
{
"name": "FORTRAN",
"bytes": "113233"
},
{
"name": "Python",
"bytes": "47548"
}
],
"symlink_target": ""
} |
import logging
import os
from pathlib import Path
import jinja2
from couchdbkit.ext.django import schema
from django.core.management.base import BaseCommand
from django.utils.dateparse import parse_date, parse_datetime
from corehq.util.couchdb_management import couch_config
from corehq.util.doc_processor.couch import DocsIterator
from corehq.util.log import with_progress_bar
from dimagi.ext import jsonobject
from dimagi.utils.modules import to_function
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Given a couch document type, iterates over all documents and reports back
on usage of each attribute, to aid in selecting SQL fields for those attributes.
For each attribute report:
- Expected field type
- Whether the value is ever None, for the purpose of deciding whether to use null=True
- Longest value, for the purpose of setting max_length
For any attribute that is a list or dict, the script will ask whether it's a submodel
(as opposed to a JsonField) and, if so, examine it the same way as a top-level attribute.
"""
def add_arguments(self, parser):
parser.add_argument(
'django_app',
)
parser.add_argument(
'class_name',
)
parser.add_argument(
'--chunk-size',
type=int,
default=100,
help="Chunk size for batches of documents fetched from Couch. "
"Default: 100",
)
COUCH_FIELDS = {'_id', '_rev', 'doc_type', 'base_doc', '_attachments'}
FIELD_TYPE_BOOL = 'models.BooleanField'
FIELD_TYPE_INTEGER = 'models.IntegerField'
FIELD_TYPE_DATE = 'models.DateField'
FIELD_TYPE_DATETIME = 'models.DateTimeField'
FIELD_TYPE_DECIMAL = 'models.DecimalField'
FIELD_TYPE_STRING = 'models.CharField'
FIELD_TYPE_JSON = 'models.JSONField'
FIELD_TYPE_SUBMODEL_LIST = 'models.ForeignKey'
FIELD_TYPE_SUBMODEL_DICT = 'models.OneToOneField'
FIELD_TYPE_UNKNOWN = 'unknown_type'
field_types = {}
field_params = {}
index_fields = set()
def handle(self, django_app, class_name, chunk_size, **options):
self.class_name = class_name
self.django_app = django_app
self.models_path = f"corehq.apps.{self.django_app}.models.{self.class_name}"
self.couch_class = to_function(self.models_path)
while not self.couch_class:
self.models_path = input(f"Could not find {self.models_path}, please enter path: ")
self.couch_class = to_function(self.models_path)
self.class_name = self.models_path.split(".")[-1]
docs = DocsIterator(self.couch_class, chunk_size)
print("Found {} {} docs\n".format(len(docs), self.class_name))
print("CTRL+C to stop evaluating documents.")
try:
for doc in with_progress_bar(docs, oneline="concise"):
self.evaluate_doc(doc)
except KeyboardInterrupt:
pass
self.standardize_max_lengths()
self.correlate_with_couch_schema(self.couch_class)
models_file = self.models_path[:-(len(self.class_name) + 1)].replace(".", os.path.sep) + ".py"
sql_model, couch_model_additions = self.generate_models_changes()
print(f"################# edit {models_file} #################")
print(sql_model)
print(f"\n################# update {self.class_name} #################")
print(couch_model_additions)
command_file = "populate_" + self.class_name.lower() + ".py"
command_file = os.path.join("corehq", "apps", self.django_app, "management", "commands", command_file)
command_content = self.generate_management_command()
print(f"\n################# add {command_file} #################")
print(command_content)
test_file_name = f'test_{self.class_name.lower()}_attr_comparision.py'
test_file = os.path.join("corehq", "apps", self.django_app, "tests", test_file_name)
test_content = self.generate_test_file()
print(f"\n################# add {test_file} #################\n")
print(test_content)
def evaluate_doc(self, doc, prefix=None):
for key, value in doc.items():
if key in self.COUCH_FIELDS:
continue
if prefix:
key = f"{prefix}.{key}"
if isinstance(value, list):
if not self.field_type(key):
if input(f"Is {key} a submodel (y/n)? ").lower().startswith("y"):
self.init_field(key, self.FIELD_TYPE_SUBMODEL_LIST)
else:
self.init_field(key, self.FIELD_TYPE_JSON, {'default': 'list'})
if self.field_type(key) == self.FIELD_TYPE_SUBMODEL_LIST:
for item in value:
if isinstance(item, dict):
self.evaluate_doc(item, prefix=key)
continue
if isinstance(value, dict):
if not self.field_type(key):
if input(f"Is {key} a submodel (y/n)? ").lower().startswith("y"):
self.init_field(key, self.FIELD_TYPE_SUBMODEL_DICT)
else:
self.init_field(key, self.FIELD_TYPE_JSON, {'default': 'dict'})
if self.field_type(key) == self.FIELD_TYPE_SUBMODEL_DICT:
self.evaluate_doc(value, prefix=key)
continue
# Primitives
if not self.field_type(key):
if isinstance(value, bool):
self.init_field(key, self.FIELD_TYPE_BOOL)
elif isinstance(value, str):
if parse_date(value):
self.init_field(key, self.FIELD_TYPE_DATE)
elif parse_datetime(value):
self.init_field(key, self.FIELD_TYPE_DATETIME)
else:
self.init_field(key, self.FIELD_TYPE_STRING)
else:
try:
if int(value) == value:
self.init_field(key, self.FIELD_TYPE_INTEGER)
else:
self.init_field(key, self.FIELD_TYPE_DECIMAL)
except TypeError:
# Couldn't parse, likely None
pass
if not self.field_type(key):
self.init_field(key, self.FIELD_TYPE_UNKNOWN)
if self.field_type(key) == self.FIELD_TYPE_BOOL:
continue
if self.field_type(key) == self.FIELD_TYPE_INTEGER:
if value is not None and int(value) != value:
self.update_field_type(key, self.FIELD_TYPE_DECIMAL)
self.update_field_max_length(key, len(str(value)))
self.update_field_null(key, value)
def init_field(self, key, field_type, params=None):
self.field_types[key] = field_type
self.field_params[key] = {
'max_length': 0,
'null': False,
}
if params:
self.field_params[key].update(params)
if field_type == self.FIELD_TYPE_BOOL:
self.field_params[key]['default'] = "'TODO'"
if key == 'domain':
self.add_index('domain')
if 'created' in key:
self.field_params[key]['auto_now_add'] = True
if 'modified' in key:
self.field_params[key]['auto_now'] = True
def field_type(self, key):
return self.field_types.get(key, None)
def update_field_type(self, key, value):
self.field_types[key] = value
def update_field_max_length(self, key, new_length):
old_max = self.field_params[key]['max_length']
self.field_params[key]['max_length'] = max(old_max, new_length)
def update_field_null(self, key, value):
self.field_params[key]['null'] = self.field_params[key]['null'] or value is None
def add_index(self, fields):
if isinstance(fields, str):
fields = (fields,)
elif isinstance(fields, list):
fields = tuple(fields)
self.index_fields.add(fields)
def standardize_max_lengths(self):
max_lengths = [1, 2, 8, 12, 32, 64, 80, 128, 256, 512, 1000]
for key, params in self.field_params.items():
if self.field_types[key] != self.FIELD_TYPE_STRING:
del self.field_params[key]['max_length']
continue
if params['max_length']:
i = 0
while i < len(max_lengths) and params['max_length'] > max_lengths[i]:
i += 1
if i < len(max_lengths):
params['max_length'] = max_lengths[i]
def correlate_with_couch_schema(self, couch_class, prefix=None):
"""Iterate through the Couch schema to add missing fields and check field types match
"""
for name, field in couch_class.properties().items():
if name in self.COUCH_FIELDS:
continue
name = f'{prefix}.{name}' if prefix else name
schema_type = self.couch_type_to_sql_type(field)
data_type = self.field_type(name)
if data_type is None:
self.init_field(name, schema_type)
continue
if data_type == self.FIELD_TYPE_UNKNOWN:
self.update_field_type(name, self.couch_type_to_sql_type(schema_type))
continue
if data_type != schema_type and data_type != self.FIELD_TYPE_JSON:
print(f"WARNING: type mismatch for {name}. "
f"Type from data '{data_type}' != type from schema '{schema_type}'")
if data_type in (self.FIELD_TYPE_SUBMODEL_DICT, self.FIELD_TYPE_SUBMODEL_LIST):
if isinstance(field, schema.SchemaProperty):
self.correlate_with_couch_schema(field.item_type, prefix=name)
elif isinstance(field, schema.SchemaDictProperty):
self.correlate_with_couch_schema(field._type, prefix=name)
def couch_type_to_sql_type(self, couch_property):
type_map = {
schema.StringProperty: self.FIELD_TYPE_STRING,
schema.BooleanProperty: self.FIELD_TYPE_BOOL,
schema.DateTimeProperty: self.FIELD_TYPE_DATETIME,
jsonobject.DateTimeProperty: self.FIELD_TYPE_DATETIME,
schema.DateProperty: self.FIELD_TYPE_DATE,
schema.IntegerProperty: self.FIELD_TYPE_INTEGER,
schema.DecimalProperty: self.FIELD_TYPE_DECIMAL,
schema.SchemaProperty: self.FIELD_TYPE_SUBMODEL_DICT,
schema.DictProperty: self.FIELD_TYPE_SUBMODEL_DICT,
schema.SchemaDictProperty: self.FIELD_TYPE_SUBMODEL_DICT,
schema.ListProperty: self.FIELD_TYPE_SUBMODEL_LIST,
schema.SchemaListProperty: self.FIELD_TYPE_SUBMODEL_LIST,
}
exact_match = type_map.get(couch_property.__class__, None)
if exact_match:
return exact_match
for schema_class, type_ in type_map.items():
if isinstance(couch_property, schema_class):
return type_
return self.FIELD_TYPE_UNKNOWN
def standardize_nulls(self):
# null defaults to False
for key, params in self.field_params.items():
if 'null' in params and not params['null']:
del self.field_params[key]['null']
def generate_models_changes(self):
suggested_fields = []
migration_field_names = []
submodels = []
for key, params in self.field_params.items():
if self.is_field_type_submodel(key):
submodels.append(key)
if self.is_submodel_key(key):
continue
arg_list = ", ".join([f"{k}={v}" for k, v, in params.items()])
suggested_fields.append(f"{key} = {self.field_types[key]}({arg_list})")
migration_field_names.append(key)
suggested_fields.append("couch_id = models.CharField(max_length=126, null=True)")
self.add_index('couch_id')
index_list = ['models.Index(fields={}),'.format(fields) for fields in self.index_fields]
db_table = self.django_app.lower() + "_" + self.class_name.replace("_", "").lower()
sql_model = render_tempate(
"sql_model.j2",
class_name=self.class_name,
migration_field_names=migration_field_names,
suggested_fields=suggested_fields,
index_list=index_list,
submodels=submodels,
db_table=db_table
)
couch_model_additions = render_tempate(
"couch_model_additions.j2",
migration_field_names=migration_field_names,
class_name=self.class_name
)
return sql_model, couch_model_additions
def generate_management_command(self):
suggested_updates = []
submodels = []
for key, field_type in self.field_types.items():
if self.is_field_type_submodel(key):
submodels.append(key)
if self.is_submodel_key(key):
continue
if field_type == self.FIELD_TYPE_DATE:
suggested_updates.append(f'"{key}": force_to_date(doc.get("{key}")),')
elif field_type == self.FIELD_TYPE_DATETIME:
suggested_updates.append(f'"{key}": force_to_datetime(doc.get("{key}")),')
else:
suggested_updates.append(f'"{key}": doc.get("{key}"),')
uri = couch_config.get_db_uri_for_class(self.couch_class)
db_slug = {uri: slug for slug, uri in couch_config.all_db_uris_by_slug.items()}[uri]
date_conversions = []
if self.FIELD_TYPE_DATE in self.field_types.values():
date_conversions.append("force_to_date")
if self.FIELD_TYPE_DATETIME in self.field_types.values():
date_conversions.append("force_to_datetime")
dates_import = ""
if date_conversions:
dates_import = f"from dimagi.utils.dates import {','.join(date_conversions)}"
return render_tempate(
"populate_command.j2",
class_name=self.class_name,
models_path=self.models_path,
db_slug=db_slug,
dates_import=dates_import,
suggested_updates=suggested_updates,
submodels=submodels
)
def generate_test_file(self):
return render_tempate(
'migration_attr_equality_test.j2',
class_name=self.class_name,
models_path=self.models_path
)
def is_submodel_key(self, key):
return "." in key or self.is_field_type_submodel(key)
def is_field_type_submodel(self, key):
return self.field_types[key] in (self.FIELD_TYPE_SUBMODEL_LIST, self.FIELD_TYPE_SUBMODEL_DICT)
def render_tempate(template_filename, **kwargs):
path = Path(__file__).parent.joinpath("templates")
templateEnv = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=path))
template = templateEnv.get_template(template_filename)
return template.render(**kwargs)
| {
"content_hash": "be82105a84422919c9b37ba7501a5711",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 110,
"avg_line_length": 41.38918918918919,
"alnum_prop": 0.5731356928300901,
"repo_name": "dimagi/commcare-hq",
"id": "ee9732a4fa1b55e351380bbfe873de4c42dc545c",
"size": "15314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/cleanup/management/commands/evaluate_couch_model_for_sql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
import math
import numpy as np
import tensorflow as tf
import util
class Network(object):
def __init__(self, variable_scope, inputs, reward_scaling, config,
write_summaries):
self.scope = variable_scope
self.inputs = inputs
self.config = config
self.write_summaries = write_summaries
self.num_heads = config.num_bootstrap_heads
self.using_ensemble = config.bootstrap_use_ensemble
conv_output = self.build_conv_layers(inputs)
if config.actor_critic:
self.build_actor_critic_heads(inputs, conv_output, reward_scaling)
else:
self.build_action_value_heads(inputs, conv_output, reward_scaling)
if self.using_ensemble:
self.build_ensemble()
self.sample_head()
def build_conv_layers(self, inputs):
nhwc = tf.transpose(inputs.frames, [0, 2, 3, 1])
self.activation_summary(nhwc)
conv1 = tf.layers.conv2d(
nhwc,
filters=32,
kernel_size=[8, 8],
strides=[4, 4],
activation=tf.nn.relu,
name='conv1')
self.activation_summary(conv1)
conv2 = tf.layers.conv2d(
conv1,
filters=64,
kernel_size=[4, 4],
strides=[2, 2],
activation=tf.nn.relu,
name='conv2')
self.activation_summary(conv2)
conv3 = tf.layers.conv2d(
conv2,
filters=64,
kernel_size=[3, 3],
strides=[1, 1],
activation=tf.nn.relu,
name='conv3')
self.activation_summary(conv3)
conv_output = tf.reshape(conv3, [-1, 64 * 7 * 7])
# Rescale gradients entering the last convolution layer
dueling_scale = 1.0 / math.sqrt(2) if self.config.dueling else 1.0
scale = dueling_scale / self.num_heads
if scale < 1:
conv_output = util.scale_gradient(conv_output, scale)
return conv_output
def build_action_value_heads(self, inputs, conv_output, reward_scaling):
self.heads = [
ActionValueHead('head%d' % i, inputs, conv_output, reward_scaling,
self.config) for i in range(self.num_heads)
]
self.action_values = tf.stack(
[head.action_values for head in self.heads],
axis=1,
name='action_values')
self.activation_summary(self.action_values)
self.taken_action_value = self.action_value(
inputs.action, name='taken_action_value')
value, greedy_action = tf.nn.top_k(self.action_values, k=1)
self.value = tf.squeeze(value, axis=2, name='value')
self.greedy_action = tf.squeeze(
greedy_action, axis=2, name='greedy_action')
def action_value(self, action, name='action_value'):
with tf.name_scope(name):
return self.choose_from_actions(self.action_values, action)
def build_actor_critic_heads(self, inputs, conv_output, reward_scaling):
self.heads = [
ActorCriticHead('head%d' % i, inputs, conv_output, reward_scaling,
self.config) for i in range(self.num_heads)
]
self.value = tf.stack(
[head.value for head in self.heads], axis=1, name='value')
self.greedy_action = tf.stack(
[head.greedy_action for head in self.heads],
axis=1,
name='greedy_action')
self.policy = tf.stack(
[head.policy for head in self.heads], axis=1, name='policy')
self._log_policy = tf.stack(
[head.log_policy for head in self.heads], axis=1, name='log_policy')
self.entropy = tf.reduce_sum(
-self.policy * self._log_policy, axis=2, name='entropy')
def log_policy(self, action, name='log_policy'):
with tf.name_scope(name):
return self.choose_from_actions(self._log_policy, action)
def choose_from_actions(self, actions, action):
return tf.reduce_sum(
actions * tf.one_hot(action, self.config.num_actions), axis=2)
def build_ensemble(self):
ensemble_votes = tf.reduce_sum(
tf.one_hot(self.greedy_action, self.config.num_actions), axis=1)
# Add some noise to break ties
noise = tf.random_uniform([self.config.num_actions])
_, ensemble_greedy_action = tf.nn.top_k(ensemble_votes + noise, k=1)
self.ensemble_greedy_action = tf.squeeze(
ensemble_greedy_action, axis=1, name='ensemble_greedy_action')
def sample_head(self):
self.active_head = self.heads[np.random.randint(self.num_heads)]
@property
def choose_action(self):
if self.num_heads == 1 or not self.using_ensemble:
return self.active_head.greedy_action
else:
return self.ensemble_greedy_action
@property
def variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope.name)
def activation_summary(self, tensor):
if self.write_summaries:
tensor_name = tensor.op.name
tf.summary.histogram(tensor_name + '/activations', tensor)
tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(tensor))
class ActionValueHead(object):
def __init__(self, name, inputs, conv_outputs, reward_scaling, config):
with tf.variable_scope(name):
action_values = self.action_value_layer(conv_outputs, config)
action_values = reward_scaling.unnormalize_output(action_values)
value, greedy_action = tf.nn.top_k(action_values, k=1)
self.action_values = tf.multiply(
inputs.alive, action_values, name='action_values')
self.value = tf.squeeze(inputs.alive * value, axis=1, name='value')
self.greedy_action = tf.squeeze(
greedy_action, axis=1, name='greedy_action')
def action_value_layer(self, conv_outputs, config):
if config.dueling:
hidden_value = tf.layers.dense(
conv_outputs, 256, tf.nn.relu, name='hidden_value')
value = tf.layers.dense(hidden_value, 1, name='value')
hidden_actions = tf.layers.dense(
conv_outputs, 256, tf.nn.relu, name='hidden_actions')
actions = tf.layers.dense(
hidden_actions, config.num_actions, name='actions')
return value + actions - tf.reduce_mean(actions, axis=1, keep_dims=True)
else:
hidden = tf.layers.dense(conv_outputs, 256, tf.nn.relu, name='hidden')
return tf.layers.dense(hidden, config.num_actions, name='action_value')
class ActorCriticHead(object):
def __init__(self, name, inputs, conv_outputs, reward_scaling, config):
with tf.variable_scope(name):
hidden = tf.layers.dense(conv_outputs, 256, tf.nn.relu, name='hidden')
value = tf.layers.dense(hidden, 1)
self.value = tf.squeeze(
inputs.alive * reward_scaling.unnormalize_output(value),
axis=1,
name='value')
actions = tf.layers.dense(hidden, config.num_actions, name='actions')
self.policy = tf.nn.softmax(actions, name='policy')
self.log_policy = tf.nn.log_softmax(actions, name='log_policy')
# Sample action from policy
self.greedy_action = tf.squeeze(
tf.multinomial(self.log_policy, num_samples=1),
axis=1,
name='greedy_action')
| {
"content_hash": "85a0c20dcfa531e4a6be78d03aa939a1",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 79,
"avg_line_length": 34.492537313432834,
"alnum_prop": 0.6470503389586038,
"repo_name": "brendanator/atari-rl",
"id": "a826a85bede5918f69131a30ed14264dce1dd45f",
"size": "6933",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "networks/dqn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62671"
}
],
"symlink_target": ""
} |
"""Tests for the XRT client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.xla.python import xla_client
from tensorflow.compiler.xla.python import xrt
from tensorflow.python.platform import test
def BuildAddAndScaleComputation(shape1, shape2):
"""Builds the computation (a + b) * 3."""
b = xla_client.ComputationBuilder("add-and-scale")
x = b.ParameterWithShape(shape1)
y = b.ParameterWithShape(shape2)
dtype = shape1.numpy_dtype().type
b.Mul(b.Add(x, y), b.Constant(dtype(3)))
return b.Build()
# TODO(phawkins): add more tests, beyond a simple "hello world" example.
class XrtBackendTest(test.TestCase):
def testBasics(self):
(worker,), _ = test.create_local_cluster(num_workers=1, num_ps=0)
self.assertTrue(worker.target.startswith("grpc://"))
tf_context = xrt.get_tf_context(worker.target[len("grpc://"):], "worker")
backend = xrt.XrtBackend(tf_context, "XLA_CPU")
a = np.arange(10)
b = np.arange(10)
c = BuildAddAndScaleComputation(
xla_client.Shape.from_pyval(a), xla_client.Shape.from_pyval(b))
executable = c.Compile(backend=backend)
output = executable.ExecuteWithPythonValues((a, b))
self.assertAllEqual(output, (a + b) * 3)
if __name__ == "__main__":
test.main()
| {
"content_hash": "02809604fd56fa111dd4e1876dd98b9b",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 77,
"avg_line_length": 30.266666666666666,
"alnum_prop": 0.6953010279001468,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "c53ac1071e342c36abb1c513bbf461be491bdf88",
"size": "2051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/compiler/xla/python/xrt_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import pytest
import cupy
from cupy.cuda import runtime
from cupy import testing
@testing.parameterize(
{'shape': ()},
{'shape': (1,)},
{'shape': (1, 1, 1)},
)
class TestNdarrayItem(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_item(self, xp, dtype):
a = xp.full(self.shape, 3, dtype)
return a.item()
@testing.parameterize(
{'shape': (0,)},
{'shape': (2, 3)},
{'shape': (1, 0, 1)},
)
class TestNdarrayItemRaise(unittest.TestCase):
def test_item(self):
for xp in (numpy, cupy):
a = testing.shaped_arange(self.shape, xp, xp.float32)
with pytest.raises(ValueError):
a.item()
@testing.parameterize(
{'shape': ()},
{'shape': (1,)},
{'shape': (2, 3)},
{'shape': (2, 3), 'order': 'C'},
{'shape': (2, 3), 'order': 'F'},
)
class TestNdarrayToBytes(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_equal()
def test_item(self, xp, dtype):
if (runtime.is_hip and
(self.shape == (1,) or
(self.shape == (2, 3) and not hasattr(self, 'order')))):
pytest.xfail('ROCm/HIP may have a bug')
a = testing.shaped_arange(self.shape, xp, dtype)
if hasattr(self, 'order'):
return a.tobytes(self.order)
else:
return a.tobytes()
| {
"content_hash": "1963455add90808bee7352ff2efe829f",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 69,
"avg_line_length": 24.135593220338983,
"alnum_prop": 0.5582865168539326,
"repo_name": "cupy/cupy",
"id": "806e6cc1382cb6dcc8c590bd0541e6f983e57650",
"size": "1424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/core_tests/test_ndarray_conversion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "38"
},
{
"name": "C",
"bytes": "712019"
},
{
"name": "C++",
"bytes": "895316"
},
{
"name": "Cuda",
"bytes": "151799"
},
{
"name": "Cython",
"bytes": "1996454"
},
{
"name": "Dockerfile",
"bytes": "40251"
},
{
"name": "PowerShell",
"bytes": "7361"
},
{
"name": "Python",
"bytes": "4841354"
},
{
"name": "Shell",
"bytes": "24521"
}
],
"symlink_target": ""
} |
import arrow
from django import template
from django.core.urlresolvers import reverse
register = template.Library()
@register.filter
def get_total_subject_posts(subject):
"""
get_total_subject_posts():
"""
total_posts = 0
for thread in subject.threads.all():
total_posts += thread.posts.count()
return total_posts
@register.filter
def started_time(created_at):
"""
started_time():
"""
return arrow.get(created_at).humanize()
@register.simple_tag
def last_posted_user_name(thread):
"""
last_posted_user_name():
"""
last_post = thread.posts.all().order_by('created_at').last()
return last_post.user.username
| {
"content_hash": "e6cd93ff899854b9b7e0ffc4ff6b6863",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 64,
"avg_line_length": 20.636363636363637,
"alnum_prop": 0.6622613803230544,
"repo_name": "GunnerJnr/_CodeInstitute",
"id": "bb0e2f13aa5d9d87de00d66c7be5666a49c7b8c9",
"size": "681",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Stream-3/Full-Stack-Development/17.Create-A-Django-Based-Forum/6.Enable-Or-Disable-Site-Functionality/we_are_social/threads/templatetags/thread_extras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "336"
},
{
"name": "CSS",
"bytes": "2545480"
},
{
"name": "HTML",
"bytes": "708226"
},
{
"name": "JavaScript",
"bytes": "1984479"
},
{
"name": "Python",
"bytes": "1727585"
},
{
"name": "Shell",
"bytes": "75780"
},
{
"name": "TSQL",
"bytes": "642"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import re
from collections import namedtuple
from pants.base.workunit import WorkUnit, WorkUnitLabel
from pants.task.task import Task
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections.orderedset import OrderedSet
from pants.contrib.go.subsystems.go_distribution import GoDistribution
from pants.contrib.go.targets.go_binary import GoBinary
from pants.contrib.go.targets.go_library import GoLibrary
from pants.contrib.go.targets.go_local_source import GoLocalSource
from pants.contrib.go.targets.go_remote_library import GoRemoteLibrary
from pants.contrib.go.targets.go_target import GoTarget
class GoTask(Task):
@classmethod
def subsystem_dependencies(cls):
return super(GoTask, cls).subsystem_dependencies() + (GoDistribution.Factory,)
@staticmethod
def is_binary(target):
return isinstance(target, GoBinary)
@staticmethod
def is_local_lib(target):
return isinstance(target, GoLibrary)
@staticmethod
def is_remote_lib(target):
return isinstance(target, GoRemoteLibrary)
@staticmethod
def is_local_src(target):
return isinstance(target, GoLocalSource)
@staticmethod
def is_go(target):
return isinstance(target, GoTarget)
@memoized_property
def go_dist(self):
return GoDistribution.Factory.global_instance().create()
@memoized_property
def import_oracle(self):
"""Return an import oracle that can help look up and categorize imports.
:rtype: :class:`ImportOracle`
"""
return ImportOracle(go_dist=self.go_dist, workunit_factory=self.context.new_workunit)
@memoized_property
def goos_goarch(self):
"""Return concatenated $GOOS and $GOARCH environment variables, separated by an underscore.
Useful for locating where the Go compiler is placing binaries ("$GOPATH/pkg/$GOOS_$GOARCH").
:rtype: string
"""
return '{goos}_{goarch}'.format(goos=self._lookup_go_env_var('GOOS'),
goarch=self._lookup_go_env_var('GOARCH'))
def _lookup_go_env_var(self, var):
return self.go_dist.create_go_cmd('env', args=[var]).check_output().strip()
class ImportOracle(object):
"""Answers questions about Go imports."""
class ListDepsError(Exception):
"""Indicates a problem listing import paths for one or more packages."""
def __init__(self, go_dist, workunit_factory):
self._go_dist = go_dist
self._workunit_factory = workunit_factory
@memoized_property
def go_stdlib(self):
"""Return the set of all Go standard library import paths.
:rtype: frozenset of string
"""
out = self._go_dist.create_go_cmd('list', args=['std']).check_output()
return frozenset(out.strip().split())
# This simple regex mirrors the behavior of the relevant go code in practice (see
# repoRootForImportDynamic and surrounding code in
# https://github.com/golang/go/blob/7bc40ffb05d8813bf9b41a331b45d37216f9e747/src/cmd/go/vcs.go).
_remote_import_re = re.compile('[^.]+(?:\.[^.]+)+\/')
def is_remote_import(self, import_path):
"""Whether the specified import_path denotes a remote import."""
return self._remote_import_re.match(import_path) is not None
def is_go_internal_import(self, import_path):
"""Return `True` if the given import path will be satisfied directly by the Go distribution.
For example, both the go standard library ("archive/tar", "bufio", "fmt", etc.) and "C" imports
are satisfiable by a Go distribution via linking of internal Go code and external c standard
library code respectively.
:rtype: bool
"""
# The "C" package is a psuedo-package that links through to the c stdlib, see:
# http://blog.golang.org/c-go-cgo
return import_path == 'C' or import_path in self.go_stdlib
class ImportListing(namedtuple('ImportListing', ['pkg_name',
'imports',
'test_imports',
'x_test_imports'])):
"""Represents all the imports of a given package."""
@property
def all_imports(self):
"""Return all imports for this package, including any test imports.
:rtype: list of string
"""
return list(OrderedSet(self.imports + self.test_imports + self.x_test_imports))
@memoized_method
def list_imports(self, pkg, gopath=None):
"""Return a listing of the dependencies of the given package.
:param string pkg: The package whose files to list all dependencies of.
:param string gopath: An optional $GOPATH which points to a Go workspace containing `pkg`.
:returns: The import listing for `pkg` that represents all its dependencies.
:rtype: :class:`ImportOracle.ImportListing`
:raises: :class:`ImportOracle.ListDepsError` if there was a problem listing the dependencies
of `pkg`.
"""
go_cmd = self._go_dist.create_go_cmd('list', args=['-json', pkg], gopath=gopath)
with self._workunit_factory('list {}'.format(pkg), cmd=str(go_cmd),
labels=[WorkUnitLabel.TOOL]) as workunit:
# TODO(John Sirois): It would be nice to be able to tee the stdout to the workunit to we have
# a capture of the json available for inspection in the server console.
process = go_cmd.spawn(stdout=subprocess.PIPE, stderr=workunit.output('stderr'))
out, _ = process.communicate()
returncode = process.returncode
workunit.set_outcome(WorkUnit.SUCCESS if returncode == 0 else WorkUnit.FAILURE)
if returncode != 0:
raise self.ListDepsError('Problem listing imports for {}: {} failed with exit code {}'
.format(pkg, go_cmd, returncode))
data = json.loads(out)
# XTestImports are for black box tests. These test files live inside the package dir but
# declare a different package and thus can only access the public members of the package's
# production code. This style of test necessarily means the test file will import the main
# package. For pants, this would lead to a cyclic self-dependency, so we omit the main
# package as implicitly included as its own dependency.
x_test_imports = [i for i in data.get('XTestImports', []) if i != pkg]
return self.ImportListing(pkg_name=data.get('Name'),
imports=data.get('Imports', []),
test_imports=data.get('TestImports', []),
x_test_imports=x_test_imports)
| {
"content_hash": "6990f5599b6cde7e1bea3406d88eea53",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 99,
"avg_line_length": 40.96363636363636,
"alnum_prop": 0.6767273265275928,
"repo_name": "fkorotkov/pants",
"id": "6def91c702efd97472f55e7d5eb8f5d75add5384",
"size": "6906",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/tasks/go_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "481460"
},
{
"name": "JavaScript",
"bytes": "35417"
},
{
"name": "Python",
"bytes": "5931594"
},
{
"name": "Rust",
"bytes": "271643"
},
{
"name": "Scala",
"bytes": "76239"
},
{
"name": "Shell",
"bytes": "74734"
},
{
"name": "Thrift",
"bytes": "2795"
}
],
"symlink_target": ""
} |
from djangoappengine.settings_base import *
import os
# Activate django-dbindexer for the default database
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
AUTOLOAD_SITECONF = 'indexes'
SITE_ID = 1
ADMINS = (
('Thiago Pagonha', 'thi.pag@gmail.com'),
)
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Sao_Paulo'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'pt-BR'
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.comments',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.syndication',
#'django.contrib.sites',
'djangotoolbox',
'autoload',
'dbindexer',
'blog',
'psn',
'wiki',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
#TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.load_template_source',
# 'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
#)
MIDDLEWARE_CLASSES = (
# This loads the index definitions, so it has to come first
'autoload.middleware.AutoloadMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
os.path.join(os.path.dirname(__file__), 'blog/templates'),
os.path.join(os.path.dirname(__file__), 'psn/templates'),
os.path.join(os.path.dirname(__file__), 'wiki/templates'),
)
ROOT_URLCONF = 'urls'
| {
"content_hash": "763e16c5da7ae038da4f6dff7a0ad2b1",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 83,
"avg_line_length": 30.322916666666668,
"alnum_prop": 0.7069735486087255,
"repo_name": "thiagopa/thiagopagonha",
"id": "d9bcdc70f9078b0778b7f49978b3682f91b8e26f",
"size": "3124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24184"
},
{
"name": "JavaScript",
"bytes": "1446"
},
{
"name": "Python",
"bytes": "21235"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import importlib
import inspect
import os
import re
import sys
import tempfile
from unittest import skipIf
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db import DatabaseError, connection
from django.template import TemplateDoesNotExist
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin
from django.urls import reverse
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import SimpleLazyObject
from django.views.debug import (
CLEANSED_SUBSTITUTE, CallableSettingWrapper, ExceptionReporter,
cleanse_setting, technical_500_response,
)
from .. import BrokenException, except_args
from ..views import (
custom_exception_reporter_filter_view, multivalue_dict_key_error,
non_sensitive_view, paranoid_view, sensitive_args_function_caller,
sensitive_kwargs_function_caller, sensitive_method_view, sensitive_view,
)
if six.PY3:
from .py3_test_debug import Py3ExceptionReporterTests # NOQA
class User(object):
def __str__(self):
return 'jacob'
class CallableSettingWrapperTests(SimpleTestCase):
""" Unittests for CallableSettingWrapper
"""
def test_repr(self):
class WrappedCallable(object):
def __repr__(self):
return "repr from the wrapped callable"
def __call__(self):
pass
actual = repr(CallableSettingWrapper(WrappedCallable()))
self.assertEqual(actual, "repr from the wrapped callable")
@override_settings(DEBUG=True, ROOT_URLCONF='view_tests.urls')
class DebugViewTests(LoggingCaptureMixin, SimpleTestCase):
def test_files(self):
response = self.client.get('/raises/')
self.assertEqual(response.status_code, 500)
data = {
'file_data.txt': SimpleUploadedFile('file_data.txt', b'haha'),
}
response = self.client.post('/raises/', data)
self.assertContains(response, 'file_data.txt', status_code=500)
self.assertNotContains(response, 'haha', status_code=500)
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
# Ensure no 403.html template exists to test the default case.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
}])
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
# Set up a test 403.html template.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'403.html': 'This is a test template for a 403 error ({{ exception }}).',
}),
],
},
}])
def test_403_template(self):
response = self.client.get('/raises403/')
self.assertContains(response, 'test template', status_code=403)
self.assertContains(response, '(Insufficient Permissions).', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_raised_404(self):
response = self.client.get('/views/raises404/')
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_404_not_in_urls(self):
response = self.client.get('/not-in-urls')
self.assertNotContains(response, "Raised by:", status_code=404)
self.assertContains(response, "<code>not-in-urls</code>, didn't match", status_code=404)
def test_technical_404(self):
response = self.client.get('/views/technical404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.technical404", status_code=404)
def test_classbased_technical_404(self):
response = self.client.get('/views/classbased404/')
self.assertContains(response, "Raised by:", status_code=404)
self.assertContains(response, "view_tests.views.Http404View", status_code=404)
def test_view_exceptions(self):
for n in range(len(except_args)):
with self.assertRaises(BrokenException):
self.client.get(reverse('view_exception', args=(n,)))
def test_non_l10ned_numeric_ids(self):
"""
Numeric IDs and fancy traceback context blocks line numbers shouldn't be localized.
"""
with self.settings(DEBUG=True, USE_L10N=True):
response = self.client.get('/raises500/')
# We look for a HTML fragment of the form
# '<div class="context" id="c38123208">', not '<div class="context" id="c38,123,208"'
self.assertContains(response, '<div class="context" id="', status_code=500)
match = re.search(b'<div class="context" id="(?P<id>[^"]+)">', response.content)
self.assertIsNotNone(match)
id_repr = match.group('id')
self.assertFalse(
re.search(b'[^c0-9]', id_repr),
"Numeric IDs in debug response HTML page shouldn't be localized (value: %s)." % id_repr
)
def test_template_exceptions(self):
for n in range(len(except_args)):
try:
self.client.get(reverse('template_exception', args=(n,)))
except Exception:
raising_loc = inspect.trace()[-1][-2][0].strip()
self.assertNotEqual(
raising_loc.find('raise BrokenException'), -1,
"Failed to find 'raise BrokenException' in last frame of "
"traceback, instead found: %s" % raising_loc
)
def test_template_loader_postmortem(self):
"""Tests for not existing file"""
template_name = "notfound.html"
with tempfile.NamedTemporaryFile(prefix=template_name) as tmpfile:
tempdir = os.path.dirname(tmpfile.name)
template_path = os.path.join(tempdir, template_name)
with override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [tempdir],
}]):
response = self.client.get(reverse('raises_template_does_not_exist', kwargs={"path": template_name}))
self.assertContains(response, "%s (Source does not exist)" % template_path, status_code=500, count=2)
def test_no_template_source_loaders(self):
"""
Make sure if you don't specify a template, the debug view doesn't blow up.
"""
with self.assertRaises(TemplateDoesNotExist):
self.client.get('/render_no_template/')
@override_settings(ROOT_URLCONF='view_tests.default_urls')
def test_default_urlconf_template(self):
"""
Make sure that the default URLconf template is shown shown instead
of the technical 404 page, if the user has not altered their
URLconf yet.
"""
response = self.client.get('/')
self.assertContains(
response,
"<h2>Congratulations on your first Django-powered page.</h2>"
)
@override_settings(ROOT_URLCONF='view_tests.regression_21530_urls')
def test_regression_21530(self):
"""
Regression test for bug #21530.
If the admin app include is replaced with exactly one url
pattern, then the technical 404 template should be displayed.
The bug here was that an AttributeError caused a 500 response.
"""
response = self.client.get('/')
self.assertContains(
response,
"Page not found <span>(404)</span>",
status_code=404
)
class DebugViewQueriesAllowedTests(SimpleTestCase):
# May need a query to initialize MySQL connection
allow_database_queries = True
def test_handle_db_exception(self):
"""
Ensure the debug view works when a database exception is raised by
performing an invalid query and passing the exception to the debug view.
"""
with connection.cursor() as cursor:
try:
cursor.execute('INVALID SQL')
except DatabaseError:
exc_info = sys.exc_info()
rf = RequestFactory()
response = technical_500_response(rf.get('/'), *exc_info)
self.assertContains(response, 'OperationalError at /', status_code=500)
@override_settings(
DEBUG=True,
ROOT_URLCONF='view_tests.urls',
# No template directories are configured, so no templates will be found.
TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}],
)
class NonDjangoTemplatesDebugViewTests(SimpleTestCase):
def test_400(self):
# Ensure that when DEBUG=True, technical_500_template() is called.
response = self.client.get('/raises400/')
self.assertContains(response, '<div class="context" id="', status_code=400)
def test_403(self):
response = self.client.get('/raises403/')
self.assertContains(response, '<h1>403 Forbidden</h1>', status_code=403)
def test_404(self):
response = self.client.get('/raises404/')
self.assertEqual(response.status_code, 404)
def test_template_not_found_error(self):
# Raises a TemplateDoesNotExist exception and shows the debug view.
url = reverse('raises_template_does_not_exist', kwargs={"path": "notfound.html"})
response = self.client.get(url)
self.assertContains(response, '<div class="context" id="', status_code=500)
class ExceptionReporterTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<p>jacob</p>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ValueError</h1>', html)
self.assertIn('<pre class="exception_value">Can't find my keys</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<h3 id="user-info">USER</h3>', html)
self.assertIn('<th>Exception Type:</th>', html)
self.assertIn('<th>Exception Value:</th>', html)
self.assertIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_eol_support(self):
"""Test that the ExceptionReporter supports Unix, Windows and Macintosh EOL markers"""
LINES = list('print %d' % i for i in range(1, 6))
reporter = ExceptionReporter(None, None, None, None)
for newline in ['\n', '\r\n', '\r']:
fd, filename = tempfile.mkstemp(text=False)
os.write(fd, force_bytes(newline.join(LINES) + newline))
os.close(fd)
try:
self.assertEqual(
reporter._get_lines_from_file(filename, 3, 2),
(1, LINES[1:3], LINES[3], LINES[4:])
)
finally:
os.unlink(filename)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">No exception message supplied</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report at /test_view/</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertIn('<th>Request Method:</th>', html)
self.assertIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertNotIn('<p>Request data not supplied</p>', html)
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
html = reporter.get_traceback_html()
self.assertIn('<h1>Report</h1>', html)
self.assertIn('<pre class="exception_value">I'm a little teapot</pre>', html)
self.assertNotIn('<th>Request Method:</th>', html)
self.assertNotIn('<th>Request URL:</th>', html)
self.assertNotIn('<th>Exception Type:</th>', html)
self.assertNotIn('<th>Exception Value:</th>', html)
self.assertNotIn('<h2>Traceback ', html)
self.assertIn('<h2>Request information</h2>', html)
self.assertIn('<p>Request data not supplied</p>', html)
def test_non_utf8_values_handling(self):
"Non-UTF-8 exceptions/values should not make the output generation choke."
try:
class NonUtf8Output(Exception):
def __repr__(self):
return b'EXC\xe9EXC'
somevar = b'VAL\xe9VAL' # NOQA
raise NonUtf8Output()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('VAL\\xe9VAL', html)
self.assertIn('EXC\\xe9EXC', html)
def test_unprintable_values_handling(self):
"Unprintable values should not make the output generation choke."
try:
class OomOutput(object):
def __repr__(self):
raise MemoryError('OOM')
oomvalue = OomOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<td class="code"><pre>Error in formatting', html)
def test_too_large_values_handling(self):
"Large values should not create a large HTML."
large = 256 * 1024
repr_of_str_adds = len(repr(''))
try:
class LargeOutput(object):
def __repr__(self):
return repr('A' * large)
largevalue = LargeOutput() # NOQA
raise ValueError()
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertEqual(len(html) // 1024 // 128, 0) # still fit in 128Kb
self.assertIn('<trimmed %d bytes string>' % (large + repr_of_str_adds,), html)
@skipIf(six.PY2, 'Bug manifests on PY3 only')
def test_unfrozen_importlib(self):
"""
importlib is not a frozen app, but its loader thinks it's frozen which
results in an ImportError on Python 3. Refs #21443.
"""
try:
request = self.rf.get('/test_view/')
importlib.import_module('abc.def.invalid.name')
except Exception:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
html = reporter.get_traceback_html()
self.assertIn('<h1>ImportError at /test_view/</h1>', html)
def test_ignore_traceback_evaluation_exceptions(self):
"""
Don't trip over exceptions generated by crafted objects when
evaluating them while cleansing (#24455).
"""
class BrokenEvaluation(Exception):
pass
def broken_setup():
raise BrokenEvaluation
request = self.rf.get('/test_view/')
broken_lazy = SimpleLazyObject(broken_setup)
try:
bool(broken_lazy)
except BrokenEvaluation:
exc_type, exc_value, tb = sys.exc_info()
self.assertIn(
"BrokenEvaluation",
ExceptionReporter(request, exc_type, exc_value, tb).get_traceback_html(),
"Evaluation exception reason not mentioned in traceback"
)
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
html = reporter.get_traceback_html()
self.assertIn("http://evil.com/", html)
class PlainTextReportTests(SimpleTestCase):
rf = RequestFactory()
def test_request_and_exception(self):
"A simple exception report can be generated"
try:
request = self.rf.get('/test_view/')
request.user = User()
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(request, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError at /test_view/', text)
self.assertIn("Can't find my keys", text)
self.assertIn('Request Method:', text)
self.assertIn('Request URL:', text)
self.assertIn('USER: jacob', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request information:', text)
self.assertNotIn('Request data not supplied', text)
def test_no_request(self):
"An exception report can be generated without request"
try:
raise ValueError("Can't find my keys")
except ValueError:
exc_type, exc_value, tb = sys.exc_info()
reporter = ExceptionReporter(None, exc_type, exc_value, tb)
text = reporter.get_traceback_text()
self.assertIn('ValueError', text)
self.assertIn("Can't find my keys", text)
self.assertNotIn('Request Method:', text)
self.assertNotIn('Request URL:', text)
self.assertNotIn('USER:', text)
self.assertIn('Exception Type:', text)
self.assertIn('Exception Value:', text)
self.assertIn('Traceback:', text)
self.assertIn('Request data not supplied', text)
def test_no_exception(self):
"An exception report can be generated for just a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, None, None)
reporter.get_traceback_text()
def test_request_and_message(self):
"A message can be provided in addition to a request"
request = self.rf.get('/test_view/')
reporter = ExceptionReporter(request, None, "I'm a little teapot", None)
reporter.get_traceback_text()
def test_message_only(self):
reporter = ExceptionReporter(None, None, "I'm a little teapot", None)
reporter.get_traceback_text()
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host(self):
"An exception report can be generated even for a disallowed host."
request = self.rf.get('/', HTTP_HOST='evil.com')
reporter = ExceptionReporter(request, None, None, None)
text = reporter.get_traceback_text()
self.assertIn("http://evil.com/", text)
class ExceptionReportTestMixin(object):
# Mixin used in the ExceptionReporterFilterTests and
# AjaxResponseExceptionReporterFilter tests below
breakfast_data = {'sausage-key': 'sausage-value',
'baked-beans-key': 'baked-beans-value',
'hash-brown-key': 'hash-brown-value',
'bacon-key': 'bacon-value'}
def verify_unsafe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# All variables are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertContains(response, k, status_code=500)
self.assertContains(response, v, status_code=500)
def verify_safe_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Non-sensitive variable's name and value are shown.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertContains(response, 'scrambled', status_code=500)
# Sensitive variable's name is shown but not its value.
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# Non-sensitive POST parameters' values are shown.
self.assertContains(response, 'baked-beans-value', status_code=500)
self.assertContains(response, 'hash-brown-value', status_code=500)
# Sensitive POST parameters' values are not shown.
self.assertNotContains(response, 'sausage-value', status_code=500)
self.assertNotContains(response, 'bacon-value', status_code=500)
def verify_paranoid_response(self, view, check_for_vars=True,
check_for_POST_params=True):
"""
Asserts that no variables or POST parameters are displayed in the response.
"""
request = self.rf.post('/some_url/', self.breakfast_data)
response = view(request)
if check_for_vars:
# Show variable names but not their values.
self.assertContains(response, 'cooked_eggs', status_code=500)
self.assertNotContains(response, 'scrambled', status_code=500)
self.assertContains(response, 'sauce', status_code=500)
self.assertNotContains(response, 'worcestershire', status_code=500)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertContains(response, k, status_code=500)
# No POST parameters' values are shown.
self.assertNotContains(response, v, status_code=500)
def verify_unsafe_email(self, view, check_for_POST_params=True):
"""
Asserts that potentially sensitive info are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters are shown.
self.assertIn(k, body_plain)
self.assertIn(v, body_plain)
self.assertIn(k, body_html)
self.assertIn(v, body_html)
def verify_safe_email(self, view, check_for_POST_params=True):
"""
Asserts that certain sensitive info are not displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body_plain = force_text(email.body)
self.assertNotIn('cooked_eggs', body_plain)
self.assertNotIn('scrambled', body_plain)
self.assertNotIn('sauce', body_plain)
self.assertNotIn('worcestershire', body_plain)
# Frames vars are shown in html email reports.
body_html = force_text(email.alternatives[0][0])
self.assertIn('cooked_eggs', body_html)
self.assertIn('scrambled', body_html)
self.assertIn('sauce', body_html)
self.assertNotIn('worcestershire', body_html)
if check_for_POST_params:
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body_plain)
# Non-sensitive POST parameters' values are shown.
self.assertIn('baked-beans-value', body_plain)
self.assertIn('hash-brown-value', body_plain)
self.assertIn('baked-beans-value', body_html)
self.assertIn('hash-brown-value', body_html)
# Sensitive POST parameters' values are not shown.
self.assertNotIn('sausage-value', body_plain)
self.assertNotIn('bacon-value', body_plain)
self.assertNotIn('sausage-value', body_html)
self.assertNotIn('bacon-value', body_html)
def verify_paranoid_email(self, view):
"""
Asserts that no variables or POST parameters are displayed in the email report.
"""
with self.settings(ADMINS=[('Admin', 'admin@fattie-breakie.com')]):
mail.outbox = [] # Empty outbox
request = self.rf.post('/some_url/', self.breakfast_data)
view(request)
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
# Frames vars are never shown in plain text email reports.
body = force_text(email.body)
self.assertNotIn('cooked_eggs', body)
self.assertNotIn('scrambled', body)
self.assertNotIn('sauce', body)
self.assertNotIn('worcestershire', body)
for k, v in self.breakfast_data.items():
# All POST parameters' names are shown.
self.assertIn(k, body)
# No POST parameters' values are shown.
self.assertNotIn(v, body)
@override_settings(ROOT_URLCONF='view_tests.urls')
class ExceptionReporterFilterTests(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Refs #14614.
"""
rf = RequestFactory()
def test_non_sensitive_request(self):
"""
Ensure that everything (request info and frame variables) can bee seen
in the default error reports for non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view)
self.verify_unsafe_email(non_sensitive_view)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters and frame variables cannot be
seen in the default error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view)
self.verify_unsafe_email(sensitive_view)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view)
self.verify_safe_email(sensitive_view)
def test_paranoid_request(self):
"""
Ensure that no POST parameters and frame variables can be seen in the
default error reports for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view)
self.verify_unsafe_email(paranoid_view)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view)
self.verify_paranoid_email(paranoid_view)
def test_multivalue_dict_key_error(self):
"""
#21098 -- Ensure that sensitive POST parameters cannot be seen in the
error reports for if request.POST['nonexistent_key'] throws an error.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(multivalue_dict_key_error)
self.verify_unsafe_email(multivalue_dict_key_error)
with self.settings(DEBUG=False):
self.verify_safe_response(multivalue_dict_key_error)
self.verify_safe_email(multivalue_dict_key_error)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view)
self.verify_unsafe_email(custom_exception_reporter_filter_view)
def test_sensitive_method(self):
"""
Ensure that the sensitive_variables decorator works with object
methods.
Refs #18379.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_unsafe_email(sensitive_method_view,
check_for_POST_params=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_method_view,
check_for_POST_params=False)
self.verify_safe_email(sensitive_method_view,
check_for_POST_params=False)
def test_sensitive_function_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as arguments to the
decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_args_function_caller)
self.verify_unsafe_email(sensitive_args_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_args_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_args_function_caller, check_for_POST_params=False)
def test_sensitive_function_keyword_arguments(self):
"""
Ensure that sensitive variables don't leak in the sensitive_variables
decorator's frame, when those variables are passed as keyword arguments
to the decorated function.
Refs #19453.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_kwargs_function_caller)
self.verify_unsafe_email(sensitive_kwargs_function_caller)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_kwargs_function_caller, check_for_POST_params=False)
self.verify_safe_email(sensitive_kwargs_function_caller, check_for_POST_params=False)
def test_callable_settings(self):
"""
Callable settings should not be evaluated in the debug page (#21345).
"""
def callable_setting():
return "This should not be displayed"
with self.settings(DEBUG=True, FOOBAR=callable_setting):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_callable_settings_forbidding_to_set_attributes(self):
"""
Callable settings which forbid to set attributes should not break
the debug page (#23070).
"""
class CallableSettingWithSlots(object):
__slots__ = []
def __call__(self):
return "This should not be displayed"
with self.settings(DEBUG=True, WITH_SLOTS=CallableSettingWithSlots()):
response = self.client.get('/raises500/')
self.assertNotContains(response, "This should not be displayed", status_code=500)
def test_dict_setting_with_non_str_key(self):
"""
A dict setting containing a non-string key should not break the
debug page (#12744).
"""
with self.settings(DEBUG=True, FOOBAR={42: None}):
response = self.client.get('/raises500/')
self.assertContains(response, 'FOOBAR', status_code=500)
def test_sensitive_settings(self):
"""
The debug page should not show some sensitive settings
(password, secret key, ...).
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
with self.settings(DEBUG=True, **{setting: "should not be displayed"}):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
def test_settings_with_sensitive_keys(self):
"""
The debug page should filter out some sensitive information found in
dict settings.
"""
sensitive_settings = [
'SECRET_KEY',
'PASSWORD',
'API_KEY',
'AUTH_TOKEN',
]
for setting in sensitive_settings:
FOOBAR = {
setting: "should not be displayed",
'recursive': {setting: "should not be displayed"},
}
with self.settings(DEBUG=True, FOOBAR=FOOBAR):
response = self.client.get('/raises500/')
self.assertNotContains(response, 'should not be displayed', status_code=500)
class AjaxResponseExceptionReporterFilter(ExceptionReportTestMixin, LoggingCaptureMixin, SimpleTestCase):
"""
Ensure that sensitive information can be filtered out of error reports.
Here we specifically test the plain text 500 debug-only error page served
when it has been detected the request was sent by JS code. We don't check
for (non)existence of frames vars in the traceback information section of
the response content because we don't include them in these error pages.
Refs #14614.
"""
rf = RequestFactory(HTTP_X_REQUESTED_WITH='XMLHttpRequest')
def test_non_sensitive_request(self):
"""
Ensure that request info can bee seen in the default error reports for
non-sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(non_sensitive_view, check_for_vars=False)
def test_sensitive_request(self):
"""
Ensure that sensitive POST parameters cannot be seen in the default
error reports for sensitive requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(sensitive_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_safe_response(sensitive_view, check_for_vars=False)
def test_paranoid_request(self):
"""
Ensure that no POST parameters can be seen in the default error reports
for "paranoid" requests.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(paranoid_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_paranoid_response(paranoid_view, check_for_vars=False)
def test_custom_exception_reporter_filter(self):
"""
Ensure that it's possible to assign an exception reporter filter to
the request to bypass the one set in DEFAULT_EXCEPTION_REPORTER_FILTER.
"""
with self.settings(DEBUG=True):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
with self.settings(DEBUG=False):
self.verify_unsafe_response(custom_exception_reporter_filter_view, check_for_vars=False)
class HelperFunctionTests(SimpleTestCase):
def test_cleanse_setting_basic(self):
self.assertEqual(cleanse_setting('TEST', 'TEST'), 'TEST')
self.assertEqual(cleanse_setting('PASSWORD', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_ignore_case(self):
self.assertEqual(cleanse_setting('password', 'super_secret'), CLEANSED_SUBSTITUTE)
def test_cleanse_setting_recurses_in_dictionary(self):
initial = {'login': 'cooper', 'password': 'secret'}
expected = {'login': 'cooper', 'password': CLEANSED_SUBSTITUTE}
self.assertEqual(cleanse_setting('SETTING_NAME', initial), expected)
| {
"content_hash": "d567bf92c8b92450be11cae349fa7aed",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 117,
"avg_line_length": 42.84502617801047,
"alnum_prop": 0.6189114548964978,
"repo_name": "sadaf2605/django",
"id": "11ecc4c8d43454ae76a72f091abc235485ce7ae3",
"size": "41087",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/view_tests/tests/test_debug.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52439"
},
{
"name": "HTML",
"bytes": "173525"
},
{
"name": "JavaScript",
"bytes": "451010"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11905278"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
url ="https://github.com/jackfischer"
req = requests.get(url)
soup = BeautifulSoup(req.content, "html.parser")
img = soup.find(class_="avatar")
imageurl = img.attrs["src"]
resp = requests.get(imageurl)
f = open("imagewoo.png", 'wb')
for b in resp:
f.write(b)
| {
"content_hash": "8e1e4209f48c351a10237ae13e7d5638",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 48,
"avg_line_length": 18.41176470588235,
"alnum_prop": 0.7060702875399361,
"repo_name": "HackBinghamton/club",
"id": "7045cedcd1a09759d25e45f64a2bf25922873a50",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/bs/gh.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24061"
},
{
"name": "JavaScript",
"bytes": "733"
},
{
"name": "Less",
"bytes": "10151"
},
{
"name": "Ruby",
"bytes": "49"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pytx'
copyright = u'2016, Mike Goffin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5.1'
# The full version, including alpha/beta/rc tags.
release = '0.5.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pytxdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pytx.tex', u'pytx Documentation',
u'Mike Goffin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pytx', u'pytx Documentation',
[u'Mike Goffin'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pytx', u'pytx Documentation',
u'Mike Goffin', 'pytx', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| {
"content_hash": "4b2425c548e34b3d47f402ebc9e216a2",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 31.52173913043478,
"alnum_prop": 0.700564263322884,
"repo_name": "theCatWisel/ThreatExchange",
"id": "e48ddfbfbab99a795b82ba725775912ca3b09422",
"size": "8392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytx/docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6697"
},
{
"name": "CSS",
"bytes": "6802"
},
{
"name": "Go",
"bytes": "10196"
},
{
"name": "HTML",
"bytes": "14636"
},
{
"name": "JavaScript",
"bytes": "79881"
},
{
"name": "Jupyter Notebook",
"bytes": "25209"
},
{
"name": "Makefile",
"bytes": "6983"
},
{
"name": "PHP",
"bytes": "21312"
},
{
"name": "Python",
"bytes": "147779"
},
{
"name": "Ruby",
"bytes": "8629"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ..utils import GCOR
def test_GCOR_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='-input %s',
copyfile=False,
mandatory=True,
position=-1,
),
mask=dict(argstr='-mask %s',
copyfile=False,
),
nfirst=dict(argstr='-nfirst %d',
),
no_demean=dict(argstr='-no_demean',
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = GCOR.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_GCOR_outputs():
output_map = dict(out=dict(),
)
outputs = GCOR.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| {
"content_hash": "3779793fc5005dbcb78dc974d488b47e",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 67,
"avg_line_length": 24.34090909090909,
"alnum_prop": 0.6022408963585434,
"repo_name": "mick-d/nipype",
"id": "9f307dda3497615b4d28e1833159cc82b51e272f",
"size": "1125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/interfaces/afni/tests/test_auto_GCOR.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
} |
import pytest
from .config import *
@pytest.fixture
def app(request):
main.app.config["TESTING"] = True
main.app.config["SQLALCHEMY_DATABASE_URI"] = TEST_DATABASE_URI
ctx = main.app.app_context()
ctx.push()
def teardown():
ctx.pop()
request.addfinalizer(teardown)
return main.app
@pytest.fixture
def db(request):
def teardown():
main.db.drop_all()
main.db.create_all()
request.addfinalizer(teardown)
return main.db
@pytest.fixture
def session(db, request):
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
def teardown():
transaction.rollback()
connection.close()
session.remove()
request.addfinalizer(teardown)
return session
@pytest.fixture
def test_client(app):
return app.test_client()
def test_hello(test_client, session):
assert isinstance(test_client, FlaskClient)
def test_add_user(session):
user = model.User(user_key="test_id")
assert repr(user) == "<User %r>" % ("test_id")
session.add(user)
session.commit()
assert user.id > 0
user = search_user("test_id")
assert user is not None
def test_delete_user(session):
user = model.User(user_key="test_id")
session.add(user)
session.commit()
user = search_user("test_id")
session.delete(user)
session.commit()
user = search_user("test_id")
assert user is None
def search_user(user_key):
user = model.User.query.filter_by(user_key=user_key).first()
return user
| {
"content_hash": "5ec3248906b0c168ffe6fbb17b23a23b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 66,
"avg_line_length": 21.025316455696203,
"alnum_prop": 0.6538229981938591,
"repo_name": "JungWinter/yellowid-flask",
"id": "93d35f24bc1d732ada89b430c5405f9303bca146",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20052"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from jaspyx.context import Context
from jaspyx.scope import Scope
class ClassContext(Context):
def __init__(self, parent, name):
super(ClassContext, self).__init__(parent)
self.scope = Scope(self.scope)
self.scope.prefix = parent.scope.prefix + [name, 'prototype']
self.scope.inherited = False
| {
"content_hash": "1178a7d851337650f702b82ad8e540ce",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 69,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.690176322418136,
"repo_name": "ztane/jaspyx",
"id": "47396a1cd7b48026b5dc4140d026e4c7a66e8c3f",
"size": "397",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jaspyx/context/class_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103192"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo', '0003_auto_20170621_1759'),
]
operations = [
migrations.AlterField(
model_name='item',
name='complete',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='item',
name='priority',
field=models.FloatField(default=-1),
),
]
| {
"content_hash": "ee4bcf4830fb08d9c7676875f6800bcd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 23,
"alnum_prop": 0.5633270321361059,
"repo_name": "noah-dev/todo_django",
"id": "fa3cbaebe183c2d020bb2ea4219131b7214e1915",
"size": "602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/migrations/0004_auto_20170625_1728.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "138847"
},
{
"name": "HTML",
"bytes": "120293"
},
{
"name": "JavaScript",
"bytes": "366216"
},
{
"name": "Python",
"bytes": "20795"
}
],
"symlink_target": ""
} |
from st2tests.base import BaseActionAliasTestCase
class StartDiscovery(BaseActionAliasTestCase):
action_alias_name = "start_discovery"
def test_start_discovery(self):
format_string = self.action_alias_db.formats[0]['representation'][0]
format_strings = self.action_alias_db.get_format_strings()
command = "orion start discovery name run-import nodes 192.168.1.1 snmp public,private" # NOQA
expected_parameters = {
'name': "run-import",
'nodes': "192.168.1.1",
'poller': 'primary',
'snmp_communities': "public,private",
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
def test_start_discovery_poller(self):
format_string = self.action_alias_db.formats[0]['representation'][0]
format_strings = self.action_alias_db.get_format_strings()
command = "orion start discovery name run-import nodes 192.168.1.1 snmp public,private poller2" # NOQA
expected_parameters = {
'name': "run-import",
'nodes': "192.168.1.1",
'poller': 'poller2',
'snmp_communities': "public,private",
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
class NcmConfigDownloadActionAliasTestCase(BaseActionAliasTestCase):
action_alias_name = 'ncm_config_download'
def test_ncm_config_download_alias(self):
format_string = self.action_alias_db.formats[0]['representation'][0]
format_strings = self.action_alias_db.get_format_strings()
command = "orion ncm config-download router1"
expected_parameters = {
'node': 'router1'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
class NodeStatusActionAliasTestCase(BaseActionAliasTestCase):
action_alias_name = 'node_status'
def test_node_status_alias(self):
format_strings = self.action_alias_db.get_format_strings()
format_string = self.action_alias_db.formats[0]['representation'][0]
command = "orion node status router1"
expected_parameters = {
'node': 'router1'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
class NodeCreateActionAliasTestCase(BaseActionAliasTestCase):
action_alias_name = 'node_create'
def test_node_create_alias(self):
format_strings = self.action_alias_db.get_format_strings()
# First Format 'orion node create'
format_string = self.action_alias_db.formats[0]['representation'][0]
command = "orion node create router1 ip 192.168.0.1 snmp read"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': 'read'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "orion node create router1 ip 192.168.0.1 poller1"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'poller1',
'community': None
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "orion node create router1 ip 192.168.0.1 snmp read"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': 'read'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "orion node create router1 ip 192.168.0.1"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': None
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
# Second format 'create orion node'
format_string = self.action_alias_db.formats[1]['representation'][0]
command = "create orion node router1 at 192.168.0.1 with read on poller1"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'poller1',
'community': 'read'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "create orion node router1 at 192.168.0.1 with read"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': 'read'
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "create orion node router1 at 192.168.0.1"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': None
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
command = "create orion node router1 at 192.168.0.1"
expected_parameters = {
'ip_address': "192.168.0.1",
'node': 'router1',
'poller': 'primary',
'community': None
}
self.assertExtractedParametersMatch(format_string=format_string,
command=command,
parameters=expected_parameters)
self.assertCommandMatchesExactlyOneFormatString(
format_strings=format_strings,
command=command)
| {
"content_hash": "0730627f4198dc0b2a7881649943def9",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 111,
"avg_line_length": 41.645320197044335,
"alnum_prop": 0.5590253134610835,
"repo_name": "tonybaloney/st2contrib",
"id": "07145590c5ff00a9a859426e993fc258a80f6198",
"size": "9201",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "packs/orion/tests/test_action_aliases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5392"
},
{
"name": "Python",
"bytes": "1285946"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7547"
}
],
"symlink_target": ""
} |
"""Module définissant une classe statique pour tester les commandes."""
from primaires.joueur.contextes.connexion.mode_connecte import ModeConnecte
class TestCommande:
"""Classe utilisée pour tester les commandes."""
@staticmethod
def entrer_commande(joueur, message):
"""Exécute la commande indiquée."""
joueur.instance_connexion.messages[:] = []
ModeConnecte.interpreter(joueur.contexte_actuel, message)
return "\n".join(joueur.instance_connexion.messages)
| {
"content_hash": "8d3aa210d6b3a9812945e13625bbc603",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 75,
"avg_line_length": 36.214285714285715,
"alnum_prop": 0.7218934911242604,
"repo_name": "stormi/tsunami",
"id": "24532fcf1ce37e724c2f7b9c2ee63a81602ba657",
"size": "2073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/primaires/connex/static/commande.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.