text stringlengths 4 1.02M | meta dict |
|---|---|
import os
from util import run
from deepy.utils import GaussianInitializer
model_path = os.path.join(os.path.dirname(__file__), "models", "gaussian1.gz")
if __name__ == '__main__':
# I have to set std to be 0.1 in this case, or it will not convergence
run(GaussianInitializer(deviation=0.1), model_path) | {
"content_hash": "6081f72fd61f3339c9a69f80d98fad10",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 78,
"avg_line_length": 34.77777777777778,
"alnum_prop": 0.7028753993610224,
"repo_name": "dlacombejr/deepy",
"id": "fbf7d7bbb0028b7e6fcb0959c8a77a77e17b3793",
"size": "360",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "experiments/initialization_schemes/gaussian.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "15998"
},
{
"name": "Python",
"bytes": "239618"
},
{
"name": "Shell",
"bytes": "504"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import io
import os
import re
import types
from datetime import datetime, timedelta
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.core.validators import (
BaseValidator, EmailValidator, MaxLengthValidator, MaxValueValidator,
MinLengthValidator, MinValueValidator, RegexValidator, URLValidator,
int_list_validator, validate_comma_separated_integer_list, validate_email,
validate_integer, validate_ipv4_address, validate_ipv6_address,
validate_ipv46_address, validate_slug,
)
from django.test import SimpleTestCase
from django.test.utils import str_prefix
from django.utils._os import upath
NOW = datetime.now()
EXTENDED_SCHEMES = ['http', 'https', 'ftp', 'ftps', 'git', 'file']
TEST_DATA = [
# (validator, value, expected),
(validate_integer, '42', None),
(validate_integer, '-42', None),
(validate_integer, -42, None),
(validate_integer, -42.5, None),
(validate_integer, None, ValidationError),
(validate_integer, 'a', ValidationError),
(validate_email, 'email@here.com', None),
(validate_email, 'weirder-email@here.and.there.com', None),
(validate_email, 'email@[127.0.0.1]', None),
(validate_email, 'email@[2001:dB8::1]', None),
(validate_email, 'email@[2001:dB8:0:0:0:0:0:1]', None),
(validate_email, 'email@[::fffF:127.0.0.1]', None),
(validate_email, 'example@valid-----hyphens.com', None),
(validate_email, 'example@valid-with-hyphens.com', None),
(validate_email, 'test@domain.with.idn.tld.उदाहरण.परीक्षा', None),
(validate_email, 'email@localhost', None),
(EmailValidator(whitelist=['localdomain']), 'email@localdomain', None),
(validate_email, '"test@test"@example.com', None),
(validate_email, None, ValidationError),
(validate_email, '', ValidationError),
(validate_email, 'abc', ValidationError),
(validate_email, 'abc@', ValidationError),
(validate_email, 'abc@bar', ValidationError),
(validate_email, 'a @x.cz', ValidationError),
(validate_email, 'abc@.com', ValidationError),
(validate_email, 'something@@somewhere.com', ValidationError),
(validate_email, 'email@127.0.0.1', ValidationError),
(validate_email, 'email@[127.0.0.256]', ValidationError),
(validate_email, 'email@[2001:db8::12345]', ValidationError),
(validate_email, 'email@[2001:db8:0:0:0:0:1]', ValidationError),
(validate_email, 'email@[::ffff:127.0.0.256]', ValidationError),
(validate_email, 'example@invalid-.com', ValidationError),
(validate_email, 'example@-invalid.com', ValidationError),
(validate_email, 'example@invalid.com-', ValidationError),
(validate_email, 'example@inv-.alid-.com', ValidationError),
(validate_email, 'example@inv-.-alid.com', ValidationError),
(validate_email, 'test@example.com\n\n<script src="x.js">', ValidationError),
# Quoted-string format (CR not allowed)
(validate_email, '"\\\011"@here.com', None),
(validate_email, '"\\\012"@here.com', ValidationError),
(validate_email, 'trailingdot@shouldfail.com.', ValidationError),
# Max length of domain name in email is 249 (see validator for calculation)
(validate_email, 'a@%s.us' % ('a' * 249), None),
(validate_email, 'a@%s.us' % ('a' * 250), ValidationError),
(validate_slug, 'slug-ok', None),
(validate_slug, 'longer-slug-still-ok', None),
(validate_slug, '--------', None),
(validate_slug, 'nohyphensoranything', None),
(validate_slug, '', ValidationError),
(validate_slug, ' text ', ValidationError),
(validate_slug, ' ', ValidationError),
(validate_slug, 'some@mail.com', ValidationError),
(validate_slug, '你好', ValidationError),
(validate_slug, '\n', ValidationError),
(validate_ipv4_address, '1.1.1.1', None),
(validate_ipv4_address, '255.0.0.0', None),
(validate_ipv4_address, '0.0.0.0', None),
(validate_ipv4_address, '256.1.1.1', ValidationError),
(validate_ipv4_address, '25.1.1.', ValidationError),
(validate_ipv4_address, '25,1,1,1', ValidationError),
(validate_ipv4_address, '25.1 .1.1', ValidationError),
# validate_ipv6_address uses django.utils.ipv6, which
# is tested in much greater detail in its own testcase
(validate_ipv6_address, 'fe80::1', None),
(validate_ipv6_address, '::1', None),
(validate_ipv6_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv6_address, '1:2', ValidationError),
(validate_ipv6_address, '::zzz', ValidationError),
(validate_ipv6_address, '12345::', ValidationError),
(validate_ipv46_address, '1.1.1.1', None),
(validate_ipv46_address, '255.0.0.0', None),
(validate_ipv46_address, '0.0.0.0', None),
(validate_ipv46_address, 'fe80::1', None),
(validate_ipv46_address, '::1', None),
(validate_ipv46_address, '1:2:3:4:5:6:7:8', None),
(validate_ipv46_address, '256.1.1.1', ValidationError),
(validate_ipv46_address, '25.1.1.', ValidationError),
(validate_ipv46_address, '25,1,1,1', ValidationError),
(validate_ipv46_address, '25.1 .1.1', ValidationError),
(validate_ipv46_address, '1:2', ValidationError),
(validate_ipv46_address, '::zzz', ValidationError),
(validate_ipv46_address, '12345::', ValidationError),
(validate_comma_separated_integer_list, '1', None),
(validate_comma_separated_integer_list, '12', None),
(validate_comma_separated_integer_list, '1,2', None),
(validate_comma_separated_integer_list, '1,2,3', None),
(validate_comma_separated_integer_list, '10,32', None),
(validate_comma_separated_integer_list, '', ValidationError),
(validate_comma_separated_integer_list, 'a', ValidationError),
(validate_comma_separated_integer_list, 'a,b,c', ValidationError),
(validate_comma_separated_integer_list, '1, 2, 3', ValidationError),
(validate_comma_separated_integer_list, ',', ValidationError),
(validate_comma_separated_integer_list, '1,2,3,', ValidationError),
(validate_comma_separated_integer_list, '1,2,', ValidationError),
(validate_comma_separated_integer_list, ',1', ValidationError),
(validate_comma_separated_integer_list, '1,,2', ValidationError),
(int_list_validator(sep='.'), '1.2.3', None),
(int_list_validator(sep='.'), '1,2,3', ValidationError),
(MaxValueValidator(10), 10, None),
(MaxValueValidator(10), -10, None),
(MaxValueValidator(10), 0, None),
(MaxValueValidator(NOW), NOW, None),
(MaxValueValidator(NOW), NOW - timedelta(days=1), None),
(MaxValueValidator(0), 1, ValidationError),
(MaxValueValidator(NOW), NOW + timedelta(days=1), ValidationError),
(MinValueValidator(-10), -10, None),
(MinValueValidator(-10), 10, None),
(MinValueValidator(-10), 0, None),
(MinValueValidator(NOW), NOW, None),
(MinValueValidator(NOW), NOW + timedelta(days=1), None),
(MinValueValidator(0), -1, ValidationError),
(MinValueValidator(NOW), NOW - timedelta(days=1), ValidationError),
(MaxLengthValidator(10), '', None),
(MaxLengthValidator(10), 10 * 'x', None),
(MaxLengthValidator(10), 15 * 'x', ValidationError),
(MinLengthValidator(10), 15 * 'x', None),
(MinLengthValidator(10), 10 * 'x', None),
(MinLengthValidator(10), '', ValidationError),
(URLValidator(EXTENDED_SCHEMES), 'file://localhost/path', None),
(URLValidator(EXTENDED_SCHEMES), 'git://example.com/', None),
(URLValidator(EXTENDED_SCHEMES), 'git://-invalid.com', ValidationError),
(BaseValidator(True), True, None),
(BaseValidator(True), False, ValidationError),
(RegexValidator(), '', None),
(RegexValidator(), 'x1x2', None),
(RegexValidator('[0-9]+'), 'xxxxxx', ValidationError),
(RegexValidator('[0-9]+'), '1234', None),
(RegexValidator(re.compile('[0-9]+')), '1234', None),
(RegexValidator('.*'), '', None),
(RegexValidator(re.compile('.*')), '', None),
(RegexValidator('.*'), 'xxxxx', None),
(RegexValidator('x'), 'y', ValidationError),
(RegexValidator(re.compile('x')), 'y', ValidationError),
(RegexValidator('x', inverse_match=True), 'y', None),
(RegexValidator(re.compile('x'), inverse_match=True), 'y', None),
(RegexValidator('x', inverse_match=True), 'x', ValidationError),
(RegexValidator(re.compile('x'), inverse_match=True), 'x', ValidationError),
(RegexValidator('x', flags=re.IGNORECASE), 'y', ValidationError),
(RegexValidator('a'), 'A', ValidationError),
(RegexValidator('a', flags=re.IGNORECASE), 'A', None),
]
def create_path(filename):
return os.path.abspath(os.path.join(os.path.dirname(upath(__file__)), filename))
# Add valid and invalid URL tests.
# This only tests the validator without extended schemes.
with io.open(create_path('valid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), None))
with io.open(create_path('invalid_urls.txt'), encoding='utf8') as f:
for url in f:
TEST_DATA.append((URLValidator(), url.strip(), ValidationError))
def create_simple_test_method(validator, expected, value, num):
if expected is not None and issubclass(expected, Exception):
test_mask = 'test_%s_raises_error_%d'
def test_func(self):
# assertRaises not used, so as to be able to produce an error message
# containing the tested value
try:
validator(value)
except expected:
pass
else:
self.fail("%s not raised when validating '%s'" % (
expected.__name__, value))
else:
test_mask = 'test_%s_%d'
def test_func(self):
try:
self.assertEqual(expected, validator(value))
except ValidationError as e:
self.fail("Validation of '%s' failed. Error message was: %s" % (
value, str(e)))
if isinstance(validator, types.FunctionType):
val_name = validator.__name__
else:
val_name = validator.__class__.__name__
test_name = test_mask % (val_name, num)
return test_name, test_func
# Dynamically assemble a test class with the contents of TEST_DATA
class TestSimpleValidators(SimpleTestCase):
def test_single_message(self):
v = ValidationError('Not Valid')
self.assertEqual(str(v), str_prefix("[%(_)s'Not Valid']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'Not Valid'])"))
def test_message_list(self):
v = ValidationError(['First Problem', 'Second Problem'])
self.assertEqual(str(v), str_prefix("[%(_)s'First Problem', %(_)s'Second Problem']"))
self.assertEqual(repr(v), str_prefix("ValidationError([%(_)s'First Problem', %(_)s'Second Problem'])"))
def test_message_dict(self):
v = ValidationError({'first': ['First Problem']})
self.assertEqual(str(v), str_prefix("{%(_)s'first': [%(_)s'First Problem']}"))
self.assertEqual(repr(v), str_prefix("ValidationError({%(_)s'first': [%(_)s'First Problem']})"))
def test_regex_validator_flags(self):
try:
RegexValidator(re.compile('a'), flags=re.IGNORECASE)
except TypeError:
pass
else:
self.fail("TypeError not raised when flags and pre-compiled regex in RegexValidator")
def test_max_length_validator_message(self):
v = MaxLengthValidator(16, message='"%(value)s" has more than %(limit_value)d characters.')
with self.assertRaisesMessage(ValidationError, '"djangoproject.com" has more than 16 characters.'):
v('djangoproject.com')
test_counter = 0
for validator, value, expected in TEST_DATA:
name, method = create_simple_test_method(validator, expected, value, test_counter)
setattr(TestSimpleValidators, name, method)
test_counter += 1
class TestValidatorEquality(TestCase):
"""
Tests that validators have valid equality operators (#21638)
"""
def test_regex_equality(self):
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
RegexValidator(r'^(?:[0-9\.\-]*)://'),
)
self.assertEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
)
self.assertNotEqual(
RegexValidator(r'^(?:[a-z0-9\.\-]*)://', "oh noes", "invalid"),
RegexValidator(r'^(?:[a-z0-9\.\-]*)://'),
)
self.assertNotEqual(
RegexValidator('', flags=re.IGNORECASE),
RegexValidator(''),
)
self.assertNotEqual(
RegexValidator(''),
RegexValidator('', inverse_match=True),
)
def test_regex_equality_nocache(self):
pattern = r'^(?:[a-z0-9\.\-]*)://'
left = RegexValidator(pattern)
re.purge()
right = RegexValidator(pattern)
self.assertEqual(
left,
right,
)
def test_regex_equality_blank(self):
self.assertEqual(
RegexValidator(),
RegexValidator(),
)
def test_email_equality(self):
self.assertEqual(
EmailValidator(),
EmailValidator(),
)
self.assertNotEqual(
EmailValidator(message="BAD EMAIL"),
EmailValidator(),
)
self.assertEqual(
EmailValidator(message="BAD EMAIL", code="bad"),
EmailValidator(message="BAD EMAIL", code="bad"),
)
def test_basic_equality(self):
self.assertEqual(
MaxValueValidator(44),
MaxValueValidator(44),
)
self.assertNotEqual(
MaxValueValidator(44),
MinValueValidator(44),
)
self.assertNotEqual(
MinValueValidator(45),
MinValueValidator(11),
)
| {
"content_hash": "3b19ea7dcc37fda461a54b6b047174f0",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 111,
"avg_line_length": 39.58888888888889,
"alnum_prop": 0.6230704462531574,
"repo_name": "marqueedev/django",
"id": "4be4b9de4f6577d4dbb7206e5e67eed36f072547",
"size": "14306",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/validators/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43000"
},
{
"name": "HTML",
"bytes": "171155"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10710867"
},
{
"name": "Shell",
"bytes": "3056"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
'''
@author: rmahfoud
'''
import UrbanOperationsResearchBook.settings as settings
import UrbanOperationsResearchBook as uor
import os, re, shutil
import logging
logger = logging.getLogger(__name__)
class UORProblemsGenerator():
MISSING_CONTENT_FILE = "#"
current_indent = 0
def __init__(self, book, *args, **kwargs):
self.book = book
def relative_url(self, path):
return uor.relative_url(self.book['base_dir'], path)
def generateProblemsContent(self):
for chapter_no in self.book['chapters'].keys():
self.generateChapterProblemsContent(self.book['chapters'][chapter_no])
def generateChapterProblemsContent(self, chapter):
section = [section for section in chapter['sections'] \
if section['section_type'] == 'problems' and not 'content_file' in section]
if len(section) == 0:
return
if len(section) > 1:
raise "More than one problem sections in chapter %s" % chapter['no']
section = section[0]
problems = section['problems']
if len(problems) == 0:
raise "Problem section contains no problems in chapter %s" % chapter['no']
templates_dir = os.path.abspath(os.path.dirname(__file__) + "/../../epub/content")
dir_name = uor.relative_url(settings.ROOT_URL, section['url'])
section['content_file'] = os.path.join(settings.CONTENT_DIR, dir_name, 'problems.html')
logger.debug("Collating problems for chapter %s in %s" % (chapter['no'], section['content_file']))
shutil.copy(os.path.join(templates_dir, "problems.html.1"), section['content_file'])
with open(section['content_file'], 'at') as fd:
for problem in problems:
with open(problem['content_file'], 'rt') as pfd:
logger.debug("Collating problem %s..." % (problem['no']))
problem_body = pfd.read()
problem_body = re.match(r'^.*?<body>(.*)</body>.*$', problem_body, re.S).group(1)
fd.write(problem_body)
fd.write("\n<br/><br/>\n")
with open(os.path.join(templates_dir, "problems.html.2"), 'rt') as fd2:
fd.write(fd2.read())
| {
"content_hash": "204e2dafada2bd1081baa5cdc1adc532",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 106,
"avg_line_length": 44.35294117647059,
"alnum_prop": 0.5937223695844386,
"repo_name": "rmahfoud/UrbanOperationsResearchBook",
"id": "fde7bdeff2786676884114fbc29fcc97e57b2170",
"size": "2262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UrbanOperationsResearchBook/epub/uor_problems.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1251"
},
{
"name": "Groff",
"bytes": "4393"
},
{
"name": "HTML",
"bytes": "1332"
},
{
"name": "Python",
"bytes": "31155"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger("tdldk")
def set_log_level(level, handler=None):
""" Set both tdldk and Bambou log level to the given level
Args:
level (logging.level): a logging level
handler (logging.Handler): a logging handler
Notes:
if no handler is provided, it will automatically create a new StreamHandler.
Examples:
>>> set_log_level(logging.INFO)
>>> set_log_level(logging.DEBUG, logging.Streamhandler())
>>> set_log_level(logging.ERROR)
"""
from bambou import bambou_logger, pushcenter_logger
if handler is None:
handler = logging.StreamHandler()
bambou_logger.setLevel(level)
bambou_logger.addHandler(handler)
pushcenter_logger.setLevel(level)
pushcenter_logger.addHandler(handler)
logger.setLevel(level)
logger.addHandler(handler) | {
"content_hash": "7522cd17e76b050afcd50a4fb54b3e04",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 88,
"avg_line_length": 27.454545454545453,
"alnum_prop": 0.6534216335540839,
"repo_name": "little-dude/monolithe",
"id": "b2ccb0e81e7cf10a6f6ae1512b73e3a17c7e5be7",
"size": "1036",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/base/sdk/python/tdldk/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16165"
},
{
"name": "HTML",
"bytes": "983593"
},
{
"name": "JavaScript",
"bytes": "93413"
},
{
"name": "Python",
"bytes": "378390"
},
{
"name": "Smarty",
"bytes": "154277"
}
],
"symlink_target": ""
} |
"""
Memento pattern
http://code.activestate.com/recipes/413838-memento-closure/
The memento pattern is great for transaction-like processing. Having a handy implementation around might not be the worst thing.
"""
import copy
def Memento(obj, deep=False):
state = (copy.copy, copy.deepcopy)[bool(deep)](obj.__dict__)
def Restore():
obj.__dict__.clear()
obj.__dict__.update(state)
return Restore
class Transaction:
"""A transaction guard. This is realy just
syntactic suggar arount a memento closure.
"""
deep = False
def __init__(self, *targets):
self.targets = targets
self.Commit()
def Commit(self):
self.states = [Memento(target, self.deep) for target in self.targets]
def Rollback(self):
for state in self.states:
state()
class transactional:
"""Adds transactional semantics to methods. Methods decorated
with @transactional will rollback to entry state upon exceptions.
"""
def __init__(self, method):
self.method = method
def __get__(self, obj, T):
def transaction(*args, **kwargs):
state = Memento(obj)
try:
return self.method(obj, *args, **kwargs)
except:
state()
raise
return transaction
if __name__ == '__main__':
class NumObj(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self.value)
def Increment(self):
self.value += 1
@transactional
def DoStuff(self):
self.value = '1111' # <- invalid value
self.Increment() # <- will fail and rollback
print
n = NumObj(-1)
print n
t = Transaction(n)
try:
for i in range(3):
n.Increment()
print n
t.Commit()
print '-- commited'
for i in range(3):
n.Increment()
print n
n.value += 'x' # will fail
print n
except:
t.Rollback()
print '-- rolled back'
print n
print '-- now doing stuff ...'
try:
n.DoStuff()
except:
print '-> doing stuff failed!'
import traceback
traceback.print_exc(0)
pass
print n
| {
"content_hash": "b877346d581b026b19cee6ba67a83b7c",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 128,
"avg_line_length": 22.482758620689655,
"alnum_prop": 0.6595092024539877,
"repo_name": "PoisonBOx/design-patterns",
"id": "3fdad76369e03b81657debbbcee041a2e1cf404f",
"size": "2002",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/memento/python/memento.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "43962"
},
{
"name": "Java",
"bytes": "37325"
},
{
"name": "Makefile",
"bytes": "4372"
},
{
"name": "PHP",
"bytes": "42495"
},
{
"name": "Perl",
"bytes": "37777"
},
{
"name": "Perl6",
"bytes": "294"
},
{
"name": "Python",
"bytes": "43429"
},
{
"name": "Ruby",
"bytes": "6438"
},
{
"name": "Shell",
"bytes": "1240"
}
],
"symlink_target": ""
} |
from unittest.mock import patch, Mock
from remote_host_event_logging.public import RemoteHostEventLogger
from ..filesystem_mounting import FilesystemMountCommand
from ..device_identification import DeviceIdentificationCommand
from .utils import MigrationCommanderTestCase
class TestFilesystemMountCommand(MigrationCommanderTestCase):
def test_execute__mount_applied(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn('sudo mount -a', self.executed_commands)
def test_execute__fstab_edited(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=549c8755-2757-446e-8c78-f76b50491f21\t'
+ DeviceIdentificationCommand._map_mountpoint('/')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=53ad2170-488d-481a-a6ab-5ce0e538f247\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
self.assertIn(
(
'sudo bash -c "echo -e \\"'
'UUID=bcab224c-8407-4783-8cea-f9ea4be3fabf\t'
+ DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2')
+ '\text4\tdefaults\t0\t2'
'\\" >> /etc/fstab"'
),
self.executed_commands
)
def test_execute__mount_dirs_created(self):
self._init_test_data('ubuntu16', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc1'),
self.executed_commands
)
self.assertIn(
'sudo mkdir -p ' + DeviceIdentificationCommand._map_mountpoint('/mnt/vdc2'),
self.executed_commands
)
@patch(
'migration_commander.remote_file_edit.RemoteFileEditor.append',
Mock(side_effect=Exception())
)
def test_execute__failed(self):
self._init_test_data('ubuntu16', 'target__device_identification')
with RemoteHostEventLogger.DisableLoggingContextManager():
with self.assertRaises(FilesystemMountCommand.MountingException):
FilesystemMountCommand(self.source).execute()
def test_execute__with_swap(self):
self._init_test_data('ubuntu12', 'target__device_identification')
FilesystemMountCommand(self.source).execute()
| {
"content_hash": "b219cc5bdc8bc6c95ce4646e0c65c52d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 88,
"avg_line_length": 35.310344827586206,
"alnum_prop": 0.5989583333333334,
"repo_name": "jdepoix/goto_cloud",
"id": "80ed64fdce9e1b564ee1842e39bc09c80184b169",
"size": "3072",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "goto_cloud/migration_commander/tests/test_filesystem_mounting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "354421"
},
{
"name": "Shell",
"bytes": "619"
}
],
"symlink_target": ""
} |
import boto.swf
import settings as settingsLib
import log
import json
import random
import os
import importlib
import time
import newrelic.agent
from provider import process
from optparse import OptionParser
import activity
from activity.activity import activity as activitybase
# Add parent directory for imports, so activity classes can use elife-poa-xml-generation
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
os.sys.path.insert(0, parentdir)
"""
Amazon SWF worker
"""
def work(ENV, flag):
# Specify run environment settings
settings = settingsLib.get_settings(ENV)
# Log
identity = "worker_%s" % os.getpid()
logger = log.logger("worker.log", settings.setLevel, identity)
# Simple connect
conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
token = None
application = newrelic.agent.application()
# Poll for an activity task indefinitely
while flag.green():
if token is None:
logger.info('polling for activity...')
activity_task = conn.poll_for_activity_task(settings.domain,
settings.default_task_list, identity)
token = get_taskToken(activity_task)
logger.info('got activity: \n%s' % json.dumps(activity_task, sort_keys=True, indent=4))
# Complete the activity based on data and activity type
activity_result = False
if token is not None:
# Get the activityType and attempt to do the work
activityType = get_activityType(activity_task)
if activityType is not None:
logger.info('activityType: %s' % activityType)
# Build a string for the object name
activity_name = get_activity_name(activityType)
with newrelic.agent.BackgroundTask(application, name=activity_name, group='worker.py'):
# Attempt to import the module for the activity
if import_activity_class(activity_name):
# Instantiate the activity object
activity_object = get_activity_object(activity_name, settings,
logger, conn, token, activity_task)
# Get the data to pass
data = get_input(activity_task)
# Do the activity
try:
activity_result = activity_object.do_activity(data)
except Exception as e:
logger.error('error executing activity %s' %
activity_name, exc_info=True)
# Print the result to the log
logger.info('got result: \n%s' %
json.dumps(activity_object.result, sort_keys=True, indent=4))
# Complete the activity task if it was successful
if type(activity_result) == str:
if activity_result == activitybase.ACTIVITY_SUCCESS:
message = activity_object.result
respond_completed(conn, logger, token, message)
elif activity_result == activitybase.ACTIVITY_TEMPORARY_FAILURE:
reason = ('error: activity failed with result '
+ str(activity_object.result))
detail = ''
respond_failed(conn, logger, token, detail, reason)
else:
# (activitybase.ACTIVITY_PERMANENT_FAILURE or activitybase.ACTIVITY_EXIT_WORKFLOW)
signal_fail_workflow(conn, logger, settings.domain,
activity_task['workflowExecution']['workflowId'],
activity_task['workflowExecution']['runId'])
else:
# for legacy actions
# Complete the activity task if it was successful
if activity_result:
message = activity_object.result
respond_completed(conn, logger, token, message)
else:
reason = ('error: activity failed with result '
+ str(activity_object.result))
detail = ''
respond_failed(conn, logger, token, detail, reason)
else:
reason = 'error: could not load object %s\n' % activity_name
detail = ''
respond_failed(conn, logger, token, detail, reason)
logger.info('error: could not load object %s\n' % activity_name)
# Reset and loop
token = None
logger.info("graceful shutdown")
def get_input(activity_task):
"""
Given a response from polling for activity from SWF via boto,
extract the input from the json data
"""
try:
input = json.loads(activity_task["input"])
except KeyError:
input = None
return input
def get_taskToken(activity_task):
"""
Given a response from polling for activity from SWF via boto,
extract the taskToken from the json data, if present
"""
try:
return activity_task["taskToken"]
except KeyError:
# No taskToken returned
return None
def get_activityType(activity_task):
"""
Given a polling for activity response from SWF via boto,
extract the activityType from the json data
"""
try:
return activity_task["activityType"]["name"]
except KeyError:
# No activityType found
return None
def get_activity_name(activityType):
"""
Given an activityType, return the name of a
corresponding activity class to load
"""
return "activity_" + activityType
def import_activity_class(activity_name):
"""
Given an activity subclass name as activity_name,
attempt to lazy load the class when needed
"""
try:
module_name = "activity." + activity_name
importlib.import_module(module_name)
# Reload the module, in case it was imported before
reload_module(module_name)
return True
except ImportError as e:
return False
def reload_module(module_name):
"""
Given an module name,
attempt to reload the module
"""
try:
reload(eval(module_name))
except NameError:
pass
def get_activity_object(activity_name, settings, logger, conn, token, activity_task):
"""
Given an activity_name, and if the module class is already
imported, create an object an return it
"""
full_path = "activity." + activity_name + "." + activity_name
f = eval(full_path)
# Create the object
activity_object = f(settings, logger, conn, token, activity_task)
return activity_object
def _log_swf_response_error(logger, e):
logger.exception('SWFResponseError: status %s, reason %s, body %s', e.status, e.reason, e.body)
def respond_completed(conn, logger, token, message):
"""
Given an SWF connection and logger as resources,
the token to specify an accepted activity and a message
to send, communicate with SWF that the activity was completed
"""
try:
out = conn.respond_activity_task_completed(token, str(message))
logger.info('respond_activity_task_completed returned %s' % out)
except boto.exception.SWFResponseError as e:
_log_swf_response_error(logger, e)
def respond_failed(conn, logger, token, details, reason):
"""
Given an SWF connection and logger as resources,
the token to specify an accepted activity, details and a reason
to send, communicate with SWF that the activity failed
"""
try:
out = conn.respond_activity_task_failed(token, str(details), str(reason))
logger.info('respond_activity_task_failed returned %s' % out)
except boto.exception.SWFResponseError as e:
_log_swf_response_error(logger, e)
def signal_fail_workflow(conn, logger, domain, workflow_id, run_id):
"""
Given an SWF connection and logger as resources,
the token to specify an accepted activity, details and a reason
to send, communicate with SWF that the activity failed
and the workflow should be abandoned
"""
try:
out = conn.request_cancel_workflow_execution(domain, workflow_id, run_id=run_id)
logger.info('request_cancel_workflow_execution %s' % out)
except boto.exception.SWFResponseError as e:
_log_swf_response_error(logger, e)
if __name__ == "__main__":
ENV = None
parser = OptionParser()
parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env", help="set the environment to run, either dev or live")
(options, args) = parser.parse_args()
if options.env:
ENV = options.env
process.monitor_interrupt(lambda flag: work(ENV, flag))
| {
"content_hash": "9167aa226d84236feb4e39443004a6ae",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 149,
"avg_line_length": 39.21632653061224,
"alnum_prop": 0.5678601165695254,
"repo_name": "jhroot/elife-bot",
"id": "4955f2c8d44f9d10690cb6e2c85912704c61b17d",
"size": "9608",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "55424"
},
{
"name": "HTML",
"bytes": "3975"
},
{
"name": "Python",
"bytes": "1263349"
},
{
"name": "Shell",
"bytes": "2363"
}
],
"symlink_target": ""
} |
import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-DMUDUO_STD_STRING'
'-Wall',
'-Wextra',
#'-Werror',
#'-Wc++98-compat',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-stdlib=libc++',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I',
'./vendor/leveldb/include',
'-I',
'./vendor/muduo/',
'-isystem',
'/usr/include',
'-isystem',
'/usr/local/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include',
'-isystem',
'/Library/Developer/CommandLineTools/usr/bin/../lib/c++/v1',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
# NOTE: This is just for YouCompleteMe; it's highly likely that your project
# does NOT need to remove the stdlib flag. DO NOT USE THIS IN YOUR
# ycm_extra_conf IF YOU'RE NOT 100% SURE YOU NEED IT.
#try:
# final_flags.remove( '-stdlib=libc++' )
#except ValueError:
# pass
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| {
"content_hash": "4fa25024671b91d9397b25ef3969593d",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 80,
"avg_line_length": 33.25547445255474,
"alnum_prop": 0.6830553116769096,
"repo_name": "liuluheng/levmu",
"id": "8f9ec3f586cc79f9fe453c840df719274b4198a1",
"size": "5956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": ".ycm_extra_conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "80278"
},
{
"name": "C++",
"bytes": "44658"
},
{
"name": "CMake",
"bytes": "1797"
},
{
"name": "Makefile",
"bytes": "1977"
},
{
"name": "Python",
"bytes": "22755"
},
{
"name": "Shell",
"bytes": "421"
}
],
"symlink_target": ""
} |
import re
import json
import requests
from datetime import date
from bs4 import BeautifulSoup
class SorosCrawler(object):
'''This object is a crawler that
-opens the website
-gets all links from it
-picks out the links that have "soros" in them
To instantiate the object provide a name (i.e. the website name) and an URL
Example: obj = SorosCrawler('Best News Site', 'http://www.bestnewssite.com')'''
def __init__(self, source_name, url):
self.source_name = source_name
self.url = url
def get_links(self):
'''opens the website and gets all links from the html source
Returns a set of the links to prevent duplication'''
html = requests.get(self.url).content
soup = BeautifulSoup(html, 'html.parser')
all_links = []
for link in soup.find_all('a'):
all_links.append(link.get('href'))
return set(all_links) #all the hyperlinks from the website + duplicates removed
def keyword_filter(self, keyword, href_link):
'''uses regex to find the keyword in the link
then transform the link to a valid hyperlink
returns an url in string format'''
if re.search(keyword, href_link.lower()) and not re.search('cth', href_link.lower()):
if href_link[0] == '/':
#no double slashes
href_link = href_link.strip('/')
if href_link[:3] == 'www':
href_link = 'http://' + href_link
if not re.search(self.url, href_link):
#sometimes the href doesn't have the full, absolute path
href_link = self.url + href_link
return href_link
else:
return None
def parse_links(self):
'''Picks all links that have 'soros' in them
If the href is incomplete it adds 'http:' to the front'''
links = []
for link in self.get_links():
try:
re.search('soros', link)
except TypeError:
pass
else:
#search for the word soros in link
link = self.keyword_filter('soros', link)
if link != None:
print(link)
try:
html = requests.get(link).content
except:
print('Requests encountered an error with the link:\n', link)
else:
soup = BeautifulSoup(html, 'html.parser')
links.append((soup.title.string.strip('\n').strip(), link))
return links #list of tuples (article title, article link)
def simple_log(source_website, links):
'''updates the JSON log with today's articles'''
with open('tweet_log.json', 'r+') as f: tweet_log = json.load(f)
try:
#if there's no entry for today it creates key
tweet_log[str(date.today())]
except KeyError:
tweet_log[str(date.today())] = {}
tweet_log[str(date.today())][source_website] = links
with open('tweet_log.json', 'w+') as f: json.dump(tweet_log, f)
print('Links for {} logged for {}'.format(source_website, date.today()))
f.close()
def crawl_websites(websites):
'''collects all the articles from all sources
then picks out the sources that has the most articles
[In] -> takes a dictionary as an argument {source name: link to source's main page}
[Out] -> dict {source: list of links}'''
todays_articles = {}
for name, url in websites.items():
'''Iterating through all the news sources
Scraping links
Logging links to date'''
obj = SorosCrawler(name, url)
print('\nGetting news from {}'.format(name))
todays_links = obj.parse_links()
print('Number of articles found: ', len(todays_links))
#logging links
with open('links.txt', 'r+') as f: linkdb = f.read().split('\n')
rejected_links = []
for link in todays_links:
print(link)
#removing the ones that are already in the db
if link[1] in linkdb:
rejected_links.append(link)
print('Link already in list')
else:
print('New article found.')
for link in rejected_links:
todays_links.remove(link)
simple_log(name, todays_links)
todays_articles[name] = todays_links
return todays_articles
if __name__ == '__main__':
NEWS_SOURCES = {'Magyar Hírlap': 'http://magyarhirlap.hu/',
'Hirado.hu': 'http://www.hirado.hu/',
'Magyar Idők': 'http://magyaridok.hu/',
'Origo.hu': 'http://www.origo.hu/',
'888.hu': 'http://888.hu/'}
print(crawl_websites(NEWS_SOURCES))
| {
"content_hash": "8419806bf0c8c63c6442835544189941",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 93,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.5558510638297872,
"repo_name": "analphagamma/SorosTracker9000",
"id": "49af1ea769c43dab6e9bbf552d605d52e90120a7",
"size": "4913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SCrawler.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "13855"
}
],
"symlink_target": ""
} |
"""
Various utilities.
"""
import bz2, gzip
def open_compressed( filename, mode='r' ):
if filename.endswith( ".bz2" ):
return bz2.BZ2File( filename, mode )
elif filename.endswith( ".gz" ):
return gzip.GzipFile( filename, mode )
else:
return file( filename, mode )
| {
"content_hash": "2ab4a286a91511732004d7acea9c00f3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 46,
"avg_line_length": 23.23076923076923,
"alnum_prop": 0.6158940397350994,
"repo_name": "bxlab/HiFive_Paper",
"id": "94a3b1341901484c8677fe940dce9cd0b3690028",
"size": "302",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Scripts/HiCLib/bx-python-0.7.1/lib/bx/misc/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5096"
},
{
"name": "C",
"bytes": "107381"
},
{
"name": "C++",
"bytes": "182835"
},
{
"name": "CMake",
"bytes": "3353"
},
{
"name": "Forth",
"bytes": "152"
},
{
"name": "Makefile",
"bytes": "22978"
},
{
"name": "Perl",
"bytes": "25453"
},
{
"name": "Python",
"bytes": "4229513"
},
{
"name": "R",
"bytes": "43022"
},
{
"name": "Shell",
"bytes": "10798"
}
],
"symlink_target": ""
} |
""" --- Day 6: Probably a Fire Hazard ---
Because your neighbors keep defeating you in the holiday house
decorating contest year after year,
you've decided to deploy one million lights in a 1000x1000 grid.
Furthermore, because you've been especially nice this year,
Santa has mailed you instructions
on how to display the ideal lighting configuration.
Lights in your grid are numbered from 0 to 999 in each direction;
the lights at each corner are at 0,0, 0,999, 999,999, and 999,0.
The instructions include whether to turn on, turn off,
or toggle various inclusive ranges given as coordinate pairs.
Each coordinate pair represents opposite corners of a rectangle, inclusive;
a coordinate pair like 0,0 through 2,2
therefore refers to 9 lights in a 3x3 square.
The lights all start turned off.
To defeat your neighbors this year,
all you have to do is set up your lights by doing the
instructions Santa sent you in order.
For example:
turn on 0,0 through 999,999 would turn on (or leave on) every light.
toggle 0,0 through 999,0 would toggle the first line of 1000 lights,
turning off the ones that were on, and turning on the ones that were off.
turn off 499,499 through 500,500 would turn off
(or leave off) the middle four lights.
After following the instructions, how many lights are lit?
--- Part Two ---
You just finish implementing your winning light pattern when you realize
you mistranslated Santa's message from Ancient Nordic Elvish.
The light grid you bought actually has individual brightness controls;
each light can have a brightness of zero or more.
The lights all start at zero.
The phrase turn on actually means that you should
increase the brightness of those lights by 1.
The phrase turn off actually means that you should
decrease the brightness of those lights by 1, to a minimum of zero.
The phrase toggle actually means that you should
increase the brightness of those lights by 2.
What is the total brightness of all lights combined
after following Santa's instructions?
For example:
turn on 0,0 through 0,0 would increase the total brightness by 1.
toggle 0,0 through 999,999 would increase the total brightness
by 2000000. """
def get_answer(filename):
"""Get the number of lit lights"""
# Set up lights as a 2D matrix of int values, starting at 0.
lights = [[0 for x in range(0, 1000)] for x in range(0, 1000)]
# The commands for managing light.
commands = {
'toggle': lambda x: x + 2,
'turn off': lambda x: x - 1 if x > 0 else 0,
'turn on': lambda x: x + 1,
}
# Use regex to parse the line and get commands and light ranges.
import re
pattern = r'^([a-zA-Z]+ *[a-zA-Z]*) (\d+),(\d+) through (\d+),(\d+)'
# Iterate over each line in the input, parse it,
# and use the commands dictionary
# to apply the operation on the lights matrix.
with open(filename, 'r') as f:
for line in f.readlines():
parsed = re.match(pattern, line)
command, start_x, start_y, end_x, end_y = parsed.groups()
for x in range(int(start_x), int(end_x) + 1):
for y in range(int(start_y), int(end_y) + 1):
lights[x][y] = commands[command](lights[x][y])
# Return total brightness by a summation of all values in the 2D array.
return sum([sum(x) for x in lights])
if __name__ == '__main__':
brightness = get_answer('./input.txt')
print(brightness)
| {
"content_hash": "d5159ea276c4882248f6e9a9f8d09a41",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 75,
"avg_line_length": 35.84375,
"alnum_prop": 0.7058994478349317,
"repo_name": "coolharsh55/advent-of-code",
"id": "62b738ee108794b8ea8731d15407b7272826b1ec",
"size": "3484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2015/day06/probably_a_fire_hazard_part_2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "52247"
},
{
"name": "Jupyter Notebook",
"bytes": "257412"
},
{
"name": "Python",
"bytes": "227874"
}
],
"symlink_target": ""
} |
from django.db import models
from tests.default_app.models import BaseType
class Type(BaseType):
code = models.SlugField()
| {
"content_hash": "1fad00e8d47d3aa3f5f3fccb10406aa6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 21.5,
"alnum_prop": 0.7674418604651163,
"repo_name": "jproffitt/django-swappable-models",
"id": "b704e64fc3789c5e6ef2f790fde52dfa6a943d61",
"size": "129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/alt_app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9489"
}
],
"symlink_target": ""
} |
import unittest
import os
import stat
import random
import shutil
import sys
import py_compile
import warnings
import marshal
import imp
from test.test_support import (unlink, TESTFN, unload, run_unittest,
check_warnings, TestFailed)
def remove_files(name):
for f in (name + os.extsep + "py",
name + os.extsep + "pyc",
name + os.extsep + "pyo",
name + os.extsep + "pyw",
name + "$py.class"):
if os.path.exists(f):
os.remove(f)
class ImportTest(unittest.TestCase):
def testCaseSensitivity(self):
# Brief digression to test that import is case-sensitive: if we got this
# far, we know for sure that "random" exists.
try:
import RAnDoM
except ImportError:
pass
else:
self.fail("import of RAnDoM should have failed (case mismatch)")
def testDoubleConst(self):
# Another brief digression to test the accuracy of manifest float constants.
from test import double_const # don't blink -- that *was* the test
def testImport(self):
def test_with_extension(ext):
# ext normally ".py"; perhaps ".pyw"
source = TESTFN + ext
pyo = TESTFN + os.extsep + "pyo"
if sys.platform.startswith('java'):
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + os.extsep + "pyc"
f = open(source, "w")
print >> f, "# This tests Python's ability to import a", ext, "file."
a = random.randrange(1000)
b = random.randrange(1000)
print >> f, "a =", a
print >> f, "b =", b
f.close()
try:
mod = __import__(TESTFN)
except ImportError, err:
self.fail("import from %s failed: %s" % (ext, err))
else:
self.assertEquals(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEquals(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
os.unlink(source)
try:
imp.reload(mod)
except ImportError, err:
self.fail("import from .pyc/.pyo failed: %s" % err)
finally:
try:
os.unlink(pyc)
except OSError:
pass
try:
os.unlink(pyo)
except OSError:
pass
del sys.modules[TESTFN]
sys.path.insert(0, os.curdir)
try:
test_with_extension(os.extsep + "py")
if sys.platform.startswith("win"):
for ext in ".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw":
test_with_extension(ext)
finally:
del sys.path[0]
if os.name == 'posix':
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
oldmask = os.umask(022)
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
f = open(fname, 'w').close()
os.chmod(fname, (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
__import__(TESTFN)
fn = fname + 'c'
if not os.path.exists(fn):
fn = fname + 'o'
if not os.path.exists(fn): raise TestFailed("__import__ did "
"not result in creation of either a .pyc or .pyo file")
s = os.stat(fn)
self.assertEquals(stat.S_IMODE(s.st_mode),
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
finally:
os.umask(oldmask)
remove_files(TESTFN)
if TESTFN in sys.modules: del sys.modules[TESTFN]
del sys.path[0]
def testImpModule(self):
# Verify that the imp module can correctly load and find .py files
import imp
x = imp.find_module("os")
os = imp.load_module("os", *x)
def test_module_with_large_stack(self, module='longlist'):
# create module w/list of 65000 elements to test bug #561858
filename = module + os.extsep + 'py'
# create a file with a list of 65000 elements
f = open(filename, 'w+')
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
f.close()
# compile & remove .py file, we only need .pyc (or .pyo)
f = open(filename, 'r')
py_compile.compile(filename)
f.close()
os.unlink(filename)
# need to be able to load from current dir
sys.path.append('')
# this used to crash
exec 'import ' + module
# cleanup
del sys.path[-1]
for ext in 'pyc', 'pyo':
fname = module + os.extsep + ext
if os.path.exists(fname):
os.unlink(fname)
def test_failing_import_sticks(self):
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1 // 0"
f.close()
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
try:
for i in 1, 2, 3:
try:
mod = __import__(TESTFN)
except ZeroDivisionError:
if TESTFN in sys.modules:
self.fail("damaged module in sys.modules on %i. try" % i)
else:
self.fail("was able to import a damaged module on %i. try" % i)
finally:
sys.path.pop(0)
remove_files(TESTFN)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
f = open(source, "w")
print >> f, "a = 1"
print >> f, "b = 2"
f.close()
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assert_(TESTFN in sys.modules, "expected module in sys.modules")
self.assertEquals(mod.a, 1, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
f = open(source, "w")
print >> f, "a = 10"
print >> f, "b = 20//0"
f.close()
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.failIf(mod is None, "expected module to still be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEquals(mod.a, 10, "module has wrong attribute values")
self.assertEquals(mod.b, 2, "module has wrong attribute values")
finally:
sys.path.pop(0)
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_infinite_reload(self):
# Bug #742342 reports that Python segfaults (infinite recursion in C)
# when faced with self-recursive reload()ing.
sys.path.insert(0, os.path.dirname(__file__))
try:
import infinite_reload
finally:
sys.path.pop(0)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.test_support
self.assert_(x is test, x.__name__)
self.assert_(hasattr(test.test_support, "__file__"))
# import x.y.z as w binds z as w
import test.test_support as y
self.assert_(y is test.test_support, y.__name__)
def test_import_initless_directory_warning(self):
with warnings.catch_warnings():
# Just a random non-package directory we always expect to be
# somewhere in sys.path...
warnings.simplefilter('error', ImportWarning)
self.assertRaises(ImportWarning, __import__, "site-packages")
def test_importbyfilename(self):
path = os.path.abspath(TESTFN)
try:
__import__(path)
except ImportError, err:
self.assertEqual("Import by filename is not supported.",
err.args[0])
else:
self.fail("import by path didn't raise an exception")
class TestPycRewriting(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.func_code.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = file_name + ("c" if __debug__ else "o")
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
del sys.modules[self.module_name]
for file_name in self.file_name, self.compiled_name:
if os.path.exists(file_name):
os.remove(file_name)
if os.path.exists(self.dir_name):
shutil.rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
mod = self.import_module()
self.assertEqual(mod.module_filename, self.compiled_name)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(8)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.func_code
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
shutil.rmtree(self.path)
sys.path = self.syspath
# http://bugs.python.org/issue1293
def test_trailing_slash(self):
f = open(os.path.join(self.path, 'test_trailing_slash.py'), 'w')
f.write("testdata = 'test_trailing_slash'")
f.close()
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
class RelativeImport(unittest.TestCase):
def tearDown(self):
try:
del sys.modules["test.relimport"]
except:
pass
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImport"))
def test_issue3221(self):
def check_absolute():
exec "from os import path" in ns
def check_relative():
exec "from . import relimport" in ns
# Check both OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_absolute()
check_relative()
# Check both OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_absolute()
check_relative()
# Check relative fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check relative fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
with check_warnings() as w:
check_absolute()
self.assert_('foo' in str(w.message))
self.assertEqual(w.category, RuntimeWarning)
self.assertRaises(SystemError, check_relative)
# Check both fail with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(ValueError, check_absolute)
self.assertRaises(ValueError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
try:
from .os import sep
except ImportError:
pass
else:
self.fail("explicit relative import triggered an "
"implicit absolute import")
def test_main(verbose=None):
run_unittest(ImportTest, TestPycRewriting, PathsTests, RelativeImport)
if __name__ == '__main__':
# test needs to be a package, so we can do relative import
from test.test_import import test_main
test_main()
| {
"content_hash": "ed51f640eda22bb3ade33c0dfce903e2",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 84,
"avg_line_length": 36.42009132420091,
"alnum_prop": 0.5576730190571715,
"repo_name": "DecipherOne/Troglodyte",
"id": "60346ab9fc82f38d9f55a888f7e1550deda14a2e",
"size": "15952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Trog Build Dependencies/Python26/Lib/test/test_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "586396"
},
{
"name": "C++",
"bytes": "697696"
},
{
"name": "CSS",
"bytes": "837"
},
{
"name": "Python",
"bytes": "14516232"
},
{
"name": "Shell",
"bytes": "127"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
from .user_base import UserBase
class UserUpdateParameters(UserBase):
"""Request parameters for updating an existing work or school account user.
:param immutable_id: This must be specified if you are using a federated
domain for the user's userPrincipalName (UPN) property when creating a new
user account. It is used to associate an on-premises Active Directory user
account with their Azure AD user object.
:type immutable_id: str
:param usage_location: A two letter country code (ISO standard 3166).
Required for users that will be assigned licenses due to legal requirement
to check for availability of services in countries. Examples include:
"US", "JP", and "GB".
:type usage_location: str
:param given_name: The given name for the user.
:type given_name: str
:param surname: The user's surname (family name or last name).
:type surname: str
:param user_type: A string value that can be used to classify user types
in your directory, such as 'Member' and 'Guest'. Possible values include:
'Member', 'Guest'
:type user_type: str or ~azure.graphrbac.models.UserType
:param account_enabled: Whether the account is enabled.
:type account_enabled: bool
:param display_name: The display name of the user.
:type display_name: str
:param password_profile: The password profile of the user.
:type password_profile: ~azure.graphrbac.models.PasswordProfile
:param user_principal_name: The user principal name
(someuser@contoso.com). It must contain one of the verified domains for
the tenant.
:type user_principal_name: str
:param mail_nickname: The mail alias for the user.
:type mail_nickname: str
"""
_attribute_map = {
'immutable_id': {'key': 'immutableId', 'type': 'str'},
'usage_location': {'key': 'usageLocation', 'type': 'str'},
'given_name': {'key': 'givenName', 'type': 'str'},
'surname': {'key': 'surname', 'type': 'str'},
'user_type': {'key': 'userType', 'type': 'str'},
'account_enabled': {'key': 'accountEnabled', 'type': 'bool'},
'display_name': {'key': 'displayName', 'type': 'str'},
'password_profile': {'key': 'passwordProfile', 'type': 'PasswordProfile'},
'user_principal_name': {'key': 'userPrincipalName', 'type': 'str'},
'mail_nickname': {'key': 'mailNickname', 'type': 'str'},
}
def __init__(self, immutable_id=None, usage_location=None, given_name=None, surname=None, user_type=None, account_enabled=None, display_name=None, password_profile=None, user_principal_name=None, mail_nickname=None):
super(UserUpdateParameters, self).__init__(immutable_id=immutable_id, usage_location=usage_location, given_name=given_name, surname=surname, user_type=user_type)
self.account_enabled = account_enabled
self.display_name = display_name
self.password_profile = password_profile
self.user_principal_name = user_principal_name
self.mail_nickname = mail_nickname
| {
"content_hash": "910e75842e46edc9f673c240e80b7116",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 220,
"avg_line_length": 52.672413793103445,
"alnum_prop": 0.6762684124386252,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "1c2ac71132a79951ae12521bc9411ab95f904ecf",
"size": "3529",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-graphrbac/azure/graphrbac/models/user_update_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
import json
import pathlib
import pytest
from ..figshare import extract_data
FIXTURES_DIR = '{}/fixtures'.format(pathlib.Path(__file__).parent)
@pytest.fixture
def json_content():
with open('{}/figshare_1.json'.format(FIXTURES_DIR)) as f:
content = f.read()
return json.loads(content)
def test_extract_data(json_content):
data = extract_data(json_content)
assert data.get('title') == 'Imperfect centered sites - a new mode of miRNA binding'
assert data.get('presented_at') == ''
def test_extract_data_empty():
data = extract_data({})
assert data.get('title') == ''
assert data.get('authors') == ''
assert data.get('abstract') == ''
assert data.get('download_url') == ''
assert data.get('presented_at') == ''
| {
"content_hash": "0dd9a77eeead02fbc50aefd87af871b5",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 88,
"avg_line_length": 25.633333333333333,
"alnum_prop": 0.647594278283485,
"repo_name": "TailorDev/pauling",
"id": "ee591644c40e0f63c0e0999ebc9c0519953d2581",
"size": "769",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/providers/tests/test_figshare.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3238"
},
{
"name": "HTML",
"bytes": "122789"
},
{
"name": "Java",
"bytes": "2017"
},
{
"name": "JavaScript",
"bytes": "52407"
},
{
"name": "Makefile",
"bytes": "2893"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Objective-C",
"bytes": "4524"
},
{
"name": "Python",
"bytes": "25209"
},
{
"name": "Ruby",
"bytes": "10058"
},
{
"name": "Shell",
"bytes": "1193"
}
],
"symlink_target": ""
} |
import logging
import urllib
from flask import redirect, g, abort
from flask import Blueprint
from google.appengine.api import urlfetch
from sparkprs import app
from sparkprs.models import Issue
jenkins = Blueprint('jenkins', __name__)
@jenkins.route("/trigger-jenkins/<int:number>", methods=['GET', 'POST'])
def test_pr(number):
"""
Triggers a parametrized Jenkins build for testing Spark pull requests.
"""
if not (g.user and g.user.has_capability("jenkins")):
return abort(403)
pr = Issue.get_or_create(number)
commit = pr.pr_json["head"]["sha"]
target_branch = pr.pr_json["base"]["ref"]
# The parameter names here were chosen to match the ones used by Jenkins' GitHub pull request
# builder plugin: https://wiki.jenkins-ci.org/display/JENKINS/Github+pull+request+builder+plugin
# In the Spark repo, the https://github.com/apache/spark/blob/master/dev/run-tests-jenkins
# script reads these variables when posting pull request feedback.
query = {
'token': app.config['JENKINS_PRB_TOKEN'],
'ghprbPullId': number,
'ghprbActualCommit': commit,
'ghprbTargetBranch': target_branch,
'ghprbPullTitle': pr.raw_title.encode('utf-8'),
# This matches the Jenkins plugin's logic; see
# https://github.com/jenkinsci/ghprb-plugin/blob/master/src/main/java/org/jenkinsci/plugins/ghprb/GhprbTrigger.java#L146
#
# It looks like origin/pr/*/merge ref points to the last successful test merge commit that
# GitHub generates when it checks for mergeability. This API technically isn't documented,
# but enough plugins seem to rely on it that it seems unlikely to change anytime soon
# (if it does, we can always upgrade our tests to perform the merge ourselves).
#
# See also: https://developer.github.com/changes/2013-04-25-deprecating-merge-commit-sha/
'sha1': ("origin/pr/%i/merge" % number) if pr.is_mergeable else commit,
}
trigger_url = "%sbuildWithParameters?%s" % (app.config["JENKINS_PRB_JOB_URL"],
urllib.urlencode(query))
logging.debug("Triggering Jenkins with url %s" % trigger_url)
response = urlfetch.fetch(trigger_url, method="POST")
if response.status_code not in (200, 201):
logging.error("Jenkins responded with status code %i" % response.status_code)
return response.content
else:
return redirect(app.config["JENKINS_PRB_JOB_URL"])
| {
"content_hash": "f257f53687cb60150536961979dac708",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 128,
"avg_line_length": 46.53703703703704,
"alnum_prop": 0.6768802228412256,
"repo_name": "databricks/spark-pr-dashboard",
"id": "90bfa939f11853374fa229ce00b8bda11de59d10",
"size": "2513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sparkprs/controllers/jenkins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1389"
},
{
"name": "HTML",
"bytes": "1598"
},
{
"name": "JavaScript",
"bytes": "76587"
},
{
"name": "Python",
"bytes": "46426"
}
],
"symlink_target": ""
} |
from __future__ import division
from datetime import datetime
from vistrails.core.system import strftime, time_strptime
from vistrails.db.domain import DBMashupActionAnnotation
class ActionAnnotation(DBMashupActionAnnotation):
def __init__(self, id, action_id, key=None, value=None, user=None, date=None):
DBMashupActionAnnotation.__init__(self, id, key, value, action_id, date,
user)
id = DBMashupActionAnnotation.db_id
action_id = DBMashupActionAnnotation.db_action_id
key = DBMashupActionAnnotation.db_key
value = DBMashupActionAnnotation.db_value
user = DBMashupActionAnnotation.db_user
def _get_date(self):
if self.db_date is not None:
return strftime(self.db_date, '%d %b %Y %H:%M:%S')
return strftime(datetime(1900,1,1), '%d %b %Y %H:%M:%S')
def _set_date(self, date):
if isinstance(date, datetime):
self.db_date = date
elif isinstance(date, basestring) and date.strip() != '':
newDate = datetime(*time_strptime(date, '%d %b %Y %H:%M:%S')[0:6])
self.db_date = newDate
date = property(_get_date, _set_date)
@staticmethod
def convert(_annotation):
_annotation.__class__ = ActionAnnotation
def __copy__(self):
return ActionAnnotation.do_copy(self)
def do_copy(self, new_ids=False, id_scope=None, id_remap=None):
cp = DBMashupActionAnnotation.do_copy(self, new_ids, id_scope, id_remap)
cp.__class__ = ActionAnnotation
return cp
# def toXml(self, node=None):
# """toXml(node: ElementTree.Element) -> ElementTree.Element
# writes itself to xml
# """
#
# if node is None:
# node = ElementTree.Element('actionAnnotation')
#
# #set attributes
# node.set('id', self.convert_to_str(self.id,'long'))
# node.set('action_id', self.convert_to_str(self.action_id,'long'))
# node.set('user', self.convert_to_str(self.user,'str'))
# node.set('key', self.convert_to_str(self.key,'str'))
# node.set('value', self.convert_to_str(self.value,'str'))
# node.set('date', self.convert_to_str(self._date,'datetime'))
# return node
#
# @staticmethod
# def fromXml(node):
# if node.tag != 'actionAnnotation':
# debug.debug("node.tag != 'actionAnnotation'")
# return None
# #read attributes
# data = node.get('id', None)
# id = Action.convert_from_str(data, 'long')
# data = node.get('action_id', None)
# action_id = Action.convert_from_str(data, 'long')
# data = node.get('key', None)
# key = Action.convert_from_str(data, 'str')
# data = node.get('value', None)
# value = Action.convert_from_str(data, 'str')
# data = node.get('user', None)
# user = Action.convert_from_str(data, 'str')
# data = node.get('date', None)
# date = Action.convert_from_str(data, 'datetime')
# return ActionAnnotation(id=id, action_id=action_id, key=key, value=value,
# user=user, date=date)
##########################################################################
# Operators
def __str__(self):
"""__str__() -> str - Returns a string representation of an
mashup_actionAnnotation object.
"""
rep = ("<mashup_actionAnnotation id=%s action_id=%s key=%s value=%s "
"date=%s user=%s</annotation>")
return rep % (str(self.id), str(self.action_id), str(self.key),
str(self.value), str(self.date), str(self.user))
def __eq__(self, other):
""" __eq__(other: mashup_actionAnnotation) -> boolean
Returns True if self and other have the same attributes. Used by ==
operator.
"""
if type(self) != type(other):
return False
if self.key != other.key:
return False
if self.value != other.value:
return False
if self.action_id != other.action_id:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
################################################################################
import unittest
from vistrails.db.domain import IdScope
import copy
class TestActionAnnotation(unittest.TestCase):
def create_annotation(self, id_scope=IdScope()):
annotation = \
ActionAnnotation(id=id_scope.getNewId('mashup_actionAnnotation'),
key='akey', action_id=1L,
value='some value', user='test')
return annotation
def test_copy(self):
id_scope = IdScope()
a1 = self.create_annotation(id_scope)
a2 = copy.copy(a1)
self.assertEquals(a1, a2)
self.assertEquals(a1.id, a2.id)
a3 = a1.do_copy(True, id_scope, {})
self.assertEquals(a1, a3)
self.assertNotEquals(a1.id, a3.id)
# def test_serialization(self):
# a1 = self.create_annotation()
# node = a1.toXml()
# a2 = ActionAnnotation.fromXml(node)
# self.assertEquals(a1, a2)
# self.assertEquals(a1.id, a2.id)
def test_str(self):
a1 = self.create_annotation()
str(a1)
| {
"content_hash": "10f919830d856973d712cbcc7b2e276f",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 82,
"avg_line_length": 36.83561643835616,
"alnum_prop": 0.5583860171067311,
"repo_name": "hjanime/VisTrails",
"id": "0be6a3661e1f40fea9581381f0c7f9d3221fa082",
"size": "7291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vistrails/core/mashup/action_annotation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19550"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66613"
},
{
"name": "PHP",
"bytes": "49302"
},
{
"name": "Python",
"bytes": "19803915"
},
{
"name": "R",
"bytes": "782836"
},
{
"name": "Ruby",
"bytes": "875"
},
{
"name": "Shell",
"bytes": "35024"
},
{
"name": "TeX",
"bytes": "145333"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
import unittest
from troposphere import Join
from troposphere.apigateway import GatewayResponse, Model
class TestModel(unittest.TestCase):
def test_schema(self):
# Check with no schema
model = Model(
"schema",
RestApiId="apiid",
)
model.validate()
# Check valid json schema string
model = Model(
"schema",
RestApiId="apiid",
Schema='{"a": "b"}',
)
model.validate()
# Check invalid json schema string
model = Model(
"schema",
RestApiId="apiid",
Schema='{"a: "b"}',
)
with self.assertRaises(ValueError):
model.validate()
# Check accepting dict and converting to string in validate
d = {"c": "d"}
model = Model("schema", RestApiId="apiid", Schema=d)
model.validate()
self.assertEqual(model.properties["Schema"], '{"c": "d"}')
# Check invalid Schema type
with self.assertRaises(TypeError):
model = Model("schema", RestApiId="apiid", Schema=1)
# Check Schema being an AWSHelperFn
model = Model(
"schema",
RestApiId="apiid",
Schema=Join(":", ['{"a', ': "b"}']),
)
model.validate()
class TestGatewayResponse(unittest.TestCase):
def test_response_type(self):
gateway_response = GatewayResponse(
"GatewayResponse",
ResponseType="DEFAULT_4XX",
RestApiId="apiid",
StatusCode="200",
)
gateway_response.validate()
with self.assertRaises(ValueError):
gateway_response = GatewayResponse(
"GatewayResponse",
ResponseType="INVALID_RESPONSE_TYPE",
RestApiId="apiid",
StatusCode="200",
)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "8ce57556405c2d223a7be1c2471d72b5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 67,
"avg_line_length": 27.01388888888889,
"alnum_prop": 0.5295629820051414,
"repo_name": "cloudtools/troposphere",
"id": "56f703caa6125e5d9dc56ddace428dbf13cd0960",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_apigateway.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2754"
},
{
"name": "Python",
"bytes": "2305574"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
} |
import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask_httpauth import HTTPTokenAuth
from flask.ext.cors import CORS
app = Flask(__name__, static_folder='static')
cors = CORS(app, resources={r"/api/*": {"origins":"*"}})
app.config.from_object('config')
db = SQLAlchemy(app)
auth = HTTPTokenAuth('Bearer')
lm = LoginManager()
lm.init_app(app)
from app import views, models, api
| {
"content_hash": "42baf30337f93f3ac5c6549819c1cb97",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 28.4375,
"alnum_prop": 0.7472527472527473,
"repo_name": "cmput404wi16/metablog-project",
"id": "04162af9e075badd1313bc962b861af24a489615",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5325"
},
{
"name": "HTML",
"bytes": "40247"
},
{
"name": "Python",
"bytes": "76897"
},
{
"name": "Shell",
"bytes": "1008"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import string
from telemetry.internal.actions import page_action
# Map from DOM key values
# (https://developer.mozilla.org/en-US/docs/Web/API/KeyboardEvent/key) to
# Windows virtual key codes
# (https://cs.chromium.org/chromium/src/third_party/WebKit/Source/platform/WindowsKeyboardCodes.h)
# and their printed representations (if available).
_KEY_MAP = {}
def _AddSpecialKey(key, windows_virtual_key_code, text=None):
assert key not in _KEY_MAP, 'Duplicate key: %s' % key
_KEY_MAP[key] = (windows_virtual_key_code, text)
def _AddRegularKey(keys, windows_virtual_key_code):
for k in keys:
assert k not in _KEY_MAP, 'Duplicate key: %s' % k
_KEY_MAP[k] = (windows_virtual_key_code, k)
def GetKey(key_name):
return _KEY_MAP.get(key_name)
_AddSpecialKey('PageUp', 0x21)
_AddSpecialKey('PageDown', 0x22)
_AddSpecialKey('End', 0x23)
_AddSpecialKey('Home', 0x24)
_AddSpecialKey('ArrowLeft', 0x25)
_AddSpecialKey('ArrowUp', 0x26)
_AddSpecialKey('ArrowRight', 0x27)
_AddSpecialKey('ArrowDown', 0x28)
_AddSpecialKey('Esc', 0x1B)
_AddSpecialKey('Return', 0x0D, text='\x0D')
_AddSpecialKey('Delete', 0x2E, text='\x7F')
_AddSpecialKey('Backspace', 0x08, text='\x08')
_AddSpecialKey('Tab', 0x09, text='\x09')
# Letter keys.
for c in string.ascii_uppercase:
_AddRegularKey([c, c.lower()], ord(c))
# Symbol keys.
_AddRegularKey(';:', 0xBA)
_AddRegularKey('=+', 0xBB)
_AddRegularKey(',<', 0xBC)
_AddRegularKey('-_', 0xBD)
_AddRegularKey('.>', 0xBE)
_AddRegularKey('/?', 0xBF)
_AddRegularKey('`~', 0xC0)
_AddRegularKey('[{', 0xDB)
_AddRegularKey('\\|', 0xDC)
_AddRegularKey(']}', 0xDD)
_AddRegularKey('\'"', 0xDE)
# Numeric keys.
_AddRegularKey('0)', 0x30)
_AddRegularKey('1!', 0x31)
_AddRegularKey('2@', 0x32)
_AddRegularKey('3#', 0x33)
_AddRegularKey('4$', 0x34)
_AddRegularKey('5%', 0x35)
_AddRegularKey('6^', 0x36)
_AddRegularKey('7&', 0x37)
_AddRegularKey('8*', 0x38)
_AddRegularKey('9(', 0x39)
# Space.
_AddRegularKey(' ', 0x20)
class KeyPressAction(page_action.PageAction):
def __init__(self, dom_key, timeout=page_action.DEFAULT_TIMEOUT):
super().__init__(timeout=timeout)
char_code = 0 if len(dom_key) > 1 else ord(dom_key)
self._dom_key = dom_key
# Check that ascii chars are allowed.
use_key_map = len(dom_key) > 1 or char_code < 128
if use_key_map and dom_key not in _KEY_MAP:
raise ValueError('No mapping for key: %s (code=%s)' % (
dom_key, char_code))
self._windows_virtual_key_code, self._text = _KEY_MAP.get(
dom_key, ('', dom_key))
def RunAction(self, tab):
# Note that this action does not handle self.timeout properly. Since each
# command gets the whole timeout, the PageAction can potentially
# take three times as long as it should.
tab.DispatchKeyEvent(
key_event_type='rawKeyDown',
dom_key=self._dom_key,
windows_virtual_key_code=self._windows_virtual_key_code,
timeout=self.timeout)
if self._text:
tab.DispatchKeyEvent(
key_event_type='char',
text=self._text,
dom_key=self._dom_key,
windows_virtual_key_code=ord(self._text),
timeout=self.timeout)
tab.DispatchKeyEvent(
key_event_type='keyUp',
dom_key=self._dom_key,
windows_virtual_key_code=self._windows_virtual_key_code,
timeout=self.timeout)
def __str__(self):
return "%s('%s')" % (self.__class__.__name__, self._dom_key)
| {
"content_hash": "f373fccdcd0055722ea87ef08d7c802b",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 98,
"avg_line_length": 31.1981981981982,
"alnum_prop": 0.6635864857060352,
"repo_name": "catapult-project/catapult",
"id": "34262885306cb10b5adceb46f3ec39e0461a3a9c",
"size": "3626",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "telemetry/telemetry/internal/actions/key_event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
import ConfigParser
import os
import sys
import string
import optparse
import traceback
import collections
import copy
from operator import attrgetter
try:
import bzrlib
except:
import platform
if platform.uname()[0] == 'Darwin':
sys.path.append('/Library/Python/2.6/site-packages')
import bzrlib
import tempfile
import subprocess
import StringIO
import re
import component
import check_tools
import vcs
import codescan
import sandbox
import l10n.pslocale
from ioutil import *
from pprint import pprint
SAMPLE = '''
[misc]
targeted platforms=windows,linux
supported platforms=osx
current milestone=ui freeze
[build tools]
python: 2.6,windows|linux,python -V,Download from activestate.com
ant: 1.7,windows|linux,ant -version,download from apache web site or use one from eclipse IDE
[test tools]
nose-test: 1.0,windows,nose --v,use easy_install
[run tools]
nginx: 0.95,windows,nginx --version,download from nginx.org
nginx: 1.3,linux,nginx --version,yum install nginx
[component dependencies]
buildscripts: code
foo: built,tagX
[scanned paths]
exclude=data/.*|samples/.*
[ui]
ui: admin ui, search, ui
[admin ui]
path=console/htdocs
targeted locales=en,fr,de
supported locales=jp
[search ui]
path=search/htdocs
targeted locales=en,fr,de,it,es,pt,jp,zh
'''
_FROMCODE_PAT = re.compile('from_code_rev_(.*)', re.IGNORECASE)
_TAG_PAT = re.compile(r'[\w-]+\.[\w]+\.[\d]+\.[\d]+ use: reusable')
_TOOLS_SECTION_TEMPLATE = '%s tools'
METADATA_FILE = "metadata.txt"
DEPENDENCIES_SECTION = 'component dependencies'
BUILD_TOOLS_SECTION = _TOOLS_SECTION_TEMPLATE % 'build'
TEST_TOOLS_SECTION = _TOOLS_SECTION_TEMPLATE % 'test'
RUN_TOOLS_SECTION = _TOOLS_SECTION_TEMPLATE % 'run'
TOOLS_SECTIONS = [BUILD_TOOLS_SECTION, TEST_TOOLS_SECTION, RUN_TOOLS_SECTION]
MISC_SECTION = 'misc'
SCANNED_FOLDER_SECTION = 'scanned folders'
SCANNED_FILE_SECTION = 'scanned files'
TARGETED_PLATFORMS_OPTION = 'targeted platforms'
SUPPORTED_PLATFORMS_OPTION = 'supported platforms'
MILESTONE_OPTION = 'current milestone'
UI_FREEZE_MILESTONE = 'ui freeze'
LOCALIZATION_COMPLETE_MILESTONE = 'localization complete'
SCAN_SECTION = 'scanned paths'
INCLUDE_OPTION_PREFIX ='include'
EXCLUDE_OPTION_PREFIX ='exclude'
UI_SECTION_PREFIX = 'ui'
UI_PATH_OPTION = 'ui path'
TARGETED_LOCALES_OPTION = 'targeted locales'
SUPPORTED_LOCALES_OPTION = 'supported locales'
DO_NOT_INTEGRATE_OPTION = 'do not integrate'
INTERESTING_EXT_PAT = re.compile(r'.*\.(cpp|java|h|py|js)$')
NON_RECURSING_FOLDERS_PAT = re.compile(r'(\.bzr|data|ext(js)?|boost|sample-data|\.metadata|built.*|run|report|Archive|Dist|Install|bin|lib|Debug|Release|prebuilt|buildtools)$')
def get_revno_from_tag(working_repo, comp):
cwd = os.getcwd()
os.chdir(os.path.join(working_repo.local_reporoot, comp.branch, comp.name, comp.reused_aspect))
p = subprocess.Popen(['bzr', 'tags'], stdout=subprocess.PIPE)
taglines = p.stdout.readlines()
tagtuples = [tag.rsplit(None, 1) for tag in taglines if tag]
os.chdir(cwd)
for tag in tagtuples:
if comp.revision == tag[0]:
return tag[1]
else:
raise Exception('%s is not a valid tag in %s' % (comp.revision, os.path.join(working_repo.local_reporoot, comp.name, comp.reused_aspect, comp.branch)))
def get_section_info_from_fp(section, fp):
conf = ConfigParser.ConfigParser()
try:
conf.readfp(fp)
if section in conf.sections():
result = {}
for option in conf.options(section):
result[option] = conf.get(section, option)
return result
except ConfigParser.MissingSectionHeaderError:
pass
return {}
def get_section_info_from_disk(section, folder):
try:
with open(os.path.join(folder, METADATA_FILE)) as fp:
return get_section_info_from_fp(section, fp)
except IOError:
print("%s does not exist in %s." % (METADATA_FILE, folder))
return {}
def get_section_info_from_vcs(section, comp, working_repo, platform, use_master=False):
revision = comp.revision
if revision:
m = _FROMCODE_PAT.match(revision)
if m:
revision = m.group(1)
try:
#print 'Checking dependencies for:', comp.name, comp.reused_aspect, comp.branch, METADATA_FILE, revision
aspect = comp.reused_aspect
if aspect == component.BUILT_ASPECT_NAME:
built_aspects = [b for b in working_repo.branches
if b[1] == comp.name
and b[2].startswith(component.BUILT_ASPECT_NAME)
and b[0] == comp.branch]
if not built_aspects:
# This is only valid because the function is trying to fetch
# dependencies, AND NOTHING ELSE. We can't generally substitute
# code aspects for built ones.
aspect = component.CODE_ASPECT_NAME
else:
# Prefer the built aspect that matches the current targeted platform.
if platform:
tmp = [a for a in built_aspects if platform in a[2]]
if tmp:
built_aspects = tmp
aspect = built_aspects[0][2]
if use_master:
txt = vcs.get_file_contents(working_repo.master_reporoot, comp.name, aspect, comp.branch, METADATA_FILE)
else:
txt = working_repo.get_file_contents(comp.name, aspect, comp.branch, METADATA_FILE, revision)
fp = StringIO.StringIO()
fp.write(txt)
fp.seek(0)
result = get_section_info_from_fp(section, fp)
fp.close()
return result
except:
print('Unable to get info about component %s from version control.' % comp)
traceback.print_exc()
return {}
def _get_deps(working_repo, platform, top_component, code_root, read_deps, already_analyzed, use_master=False, check_vcs=True):
if top_component.name == 'buildscripts':
top_component.reused_aspect = component.CODE_ASPECT_NAME
##TODO julie why would we do this?
## if top_component.reused_aspect == component.BUILT_ASPECT_NAME:
## interesting_branches = [b for b in working_repo.branches if b[1] == top_component.name and b[2].startswith(component.BUILT_ASPECT_NAME) and b[0] == top_component.branch]
## if not interesting_branches:
## top_component.reused_aspect = component.CODE_ASPECT_NAME
folder = ''
if (not top_component.revision) and code_root:
fldr = os.path.join(code_root, top_component.name)
if os.path.isdir(fldr):
if check_vcs and vcs.folder_is_tied_to_vcs(fldr):
output = vcs.get_status(fldr, status_filter=lambda lbl: lbl == 'modified' or lbl == 'added')
if output:
if 'modified' in output:
if METADATA_FILE in output['modified']:
folder = fldr
if 'added' in output:
if METADATA_FILE in output['added']:
folder = fldr
else:
folder = fldr
if folder:
if folder in already_analyzed:
return top_component #sections = already_analyzed[folder]
else:
print('\nLoading %s from %s.' % (METADATA_FILE, folder))
x = get_section_info_from_disk(MISC_SECTION, folder)
if 'terminal dependency' in x and top_component.reused_aspect.startswith(component.BUILT_ASPECT_NAME):
return top_component
sections = get_section_info_from_disk(DEPENDENCIES_SECTION, folder)
already_analyzed[folder] = sections
elif check_vcs:
key = '%s:%s' % (top_component.name, top_component.reused_aspect) #str(top_component)
if key in already_analyzed:
return top_component #sections = already_analyzed[key]
else:
x = get_section_info_from_vcs(MISC_SECTION, top_component, working_repo, platform, use_master)
if 'terminal dependency' in x and top_component.reused_aspect.startswith(component.BUILT_ASPECT_NAME):
return top_component
sections = get_section_info_from_vcs(DEPENDENCIES_SECTION, top_component, working_repo, platform, use_master)
already_analyzed[key] = sections
else:
return top_component
compOldDeps = False
for componentname, info in sections.iteritems():
componentname = componentname.strip()
aspect, revision, old = component.parse_component_info(info)
if aspect == component.BUILT_ASPECT_NAME:
aspect += "." + platform
if old:
compOldDeps = True
componentname, ignored, branch, task = working_repo.normalize(componentname, aspect, top_component.branch)
if revision:
m = _TAG_PAT.match(revision)
if not m:
raise Exception('%s is not a valid tag for pinning dependencies.' % revision)
assert(aspect)
top_component.dependencies.append(component.Component(componentname, branch, revision, aspect, parent=top_component))
if compOldDeps:
print('''Component %s/%s/%s has the old format for dependencies.
Please update dependencies in metadata.txt to match format found at:
https:// ... /working-with-code/concepts/dependencies''' % (top_component.name,top_component.reused_aspect,top_component.branch)) # TODO KIM refer to doc site
top_component.rank += len(top_component.dependencies)
for dep in top_component.dependencies:
if top_component.reused_aspect.startswith(component.BUILT_ASPECT_NAME):
dep.reused_aspect = top_component.reused_aspect
# We are suspicious that this optimization isn't working
if str(dep) not in read_deps or read_deps[str(dep)] != dep:
read_deps[str(dep)] = dep
dep = _get_deps(working_repo, platform, dep, code_root, read_deps, already_analyzed, use_master, check_vcs)
top_component.rank += dep.rank
return top_component
def has_component(dep_tree, comp_name):
if dep_tree.name == comp_name:
return True
for d in dep_tree.dependencies:
if d.name == comp_name:
return True
for d in dep_tree.dependencies:
if has_component(d, comp_name):
return True
def get_components_inv_dep_order(working_repo, platform, top, code_root=None, branch='trunk', revision=None, aspect=component.CODE_ASPECT_NAME, debug=False, use_master=False, check_vcs=True):
'''
Return a list of components in inverse dependency order, using the specified
component as the starting point of the dependency graph. Inverse dependency
order means that the components with no dependencies (the leaves) are listed
first, and the most dependent component is last. This is valid build order.
@param top The name of the topmost component.
@param code_root Optional. The fully qualified path to the code root of an
existing sandbox. If specified, then the metadata.txt files for components
in the coderoot are used to override/pre-empt the checked-in versions.
@param branch The branch of the components. All components in the dependency
tree must share this branch.
@param revision The revision of the topmost component; None = latest.
@param aspect The aspect of the topmost component.
'''
already_analyzed = {}
comp = component.Component(top, branch, revision, aspect)
dep_tree = _get_deps(working_repo, platform, comp, code_root, {str(comp): comp}, already_analyzed, use_master, check_vcs)
# At this point dep_tree is a single Component object that has embedded child
# nodes in its .dependencies member. If our topmost component is terminal,
# we won't have buildscripts in the code folder unless we artificially
# inject the dependency here...
if not has_component(dep_tree, 'buildscripts'):
c = component.Component('buildscripts', dep_tree.branch, None, component.CODE_ASPECT_NAME, parent=dep_tree)
dep_tree.dependencies.append(c)
if debug:
tree = dep_tree
_detect_conflicts(working_repo, dep_tree, branch, top, [], debug)
components = _trim_tree(dep_tree)
deps = sorted(components, key=attrgetter('rank', 'name'))
if debug:
print('\nDependencies:')
for comp in deps:
print comp
print('-----------------------------------')
print('\nFull dependence tree')
print_tree(tree)
print('-----------------------------------')
unnecessary_dependencies(tree)
return deps
def _trim_tree(tree):
components = set([tree])
queue = []
for d in tree.dependencies:
queue.append(d)
i = 0
while i < len(queue):
dep = queue[i]
components.add(dep)
for d in dep.dependencies:
queue.append(d)
i += 1
return [c for c in components]
def _detect_conflicts(working_repo, tree, branch, top, components=[], debug=False):
components = {tree.name:[tree]}
queue = []
for d in tree.dependencies:
queue.append(d)
i = 0
while i < len(queue):
dep = queue[i]
if dep.name in components:
components[dep.name].append(dep)
else:
components[dep.name] = [dep]
for d in dep.dependencies:
queue.append(d)
i += 1
for comp, conflicts in components.iteritems():
uses_code = []
used_built = []
other_revisions = []
diff_revision = False
if len(conflicts) > 1:
highest_revision = -1
code = False
for conflict in conflicts:
if conflict.reused_aspect == component.CODE_ASPECT_NAME:
code = True
uses_code.append(conflict)
else:
used_built.append(conflict)
if highest_revision is not None:
if type(highest_revision) != type(conflict):
highest_revision = conflict
elif conflict.revision is None:
if highest_revision.revision is not None:
diff_revision = True
highest_revision = conflict
elif get_revno_from_tag(working_repo, conflict) > get_revno_from_tag(working_repo, highest_revision):
diff_revision = True
highest_revision = conflict
elif get_revno_from_tag(working_repo, conflict) != get_revno_from_tag(working_repo, highest_revision):
diff_revision = True
for conflict in conflicts:
if code:
conflict.reused_aspect = component.CODE_ASPECT_NAME
if conflict.revision != highest_revision.revision:
other_revisions.append(conflict)
conflict.revision = highest_revision.revision
if debug:
if (uses_code and used_built) or diff_revision:
print('%s had conflicts that were resolved.' % comp)
if uses_code and used_built:
print('\nThese components used the built aspect')
for ub in used_built:
print ub
print('\nThere were changed to code because these components use the code aspect')
for uc in uses_code:
print uc
if diff_revision:
if highest_revision.revision is None:
print('\n%s is unpinned.' % highest_revision)
else:
print('%s is the highest revision at %s %s' % (highest_revision, highest_revision.revision, get_revno_from_tag(working_repo, highest_revision)))
print('Other revision considered.')
for other in other_revisions:
print highest_revision.revision, get_revno_from_tag(working_repo, highest_revision)
if (uses_code and used_built) or diff_revision:
print('-----------------------------------\n')
'''conflicts = []
for c in components:
if c.name == tree.name and c != tree:
conflicts.append(c)
if conflicts:
# Resolve conflicts. Use highest revision number and source code over pre-built.
revision = tree
code = tree.reused_aspect == 'code'
for c in conflicts:
if c.reused_aspect == 'code':
code = True
if (revision.revision is not None) and get_revno_from_tag(working_repo, c) > get_revno_from_tag(working_repo, revision):
revision = c
if code:
aspect = 'code'
else:
aspect = tree.reused_aspect
resolved = component.Component(conflicts[0].name, tree.branch, revision.revision, aspect)
if resolved == tree:
resolved = tree
else:
for c in conflicts:
if c == resolved:
resolved = c
tree.revision = resolved.revision
tree.reused_aspect = resolved.reused_aspect
tree.rank = resolved.rank
tree.dependencies = resolved.dependencies
for c in conflicts:
if not c == resolved:
c.revision = resolved.revision
c.reused_aspect = resolved.reused_aspect
c.rank = resolved.rank
c.dependencies = resolved.dependencies
components.append(tree)
for d in tree.dependencies[:]:
_detect_conflicts(working_repo, d, branch, top, components)'''
def unnecessary_dependencies(tree):
components = {(tree.name, tree.reused_aspect, tree.revision):[str(tree.parent)]}
queue = []
for d in tree.dependencies:
queue.append(d)
i = 0
while i < len(queue):
dep = queue[i]
if (dep.name, dep.reused_aspect, dep.revision) in components:
components[(dep.name, dep.reused_aspect, dep.revision)].append(str(dep.parent))
else:
components[(dep.name, dep.reused_aspect, dep.revision)] = [str(dep.parent)]
for d in dep.dependencies:
queue.append(d)
i += 1
unnecessary = {}
for comp, parents in components.iteritems():
if len(parents) > 1:
for parent in parents:
for p in parents[parents.index(parent)+1:]:
if parent in p:
if comp in unnecessary:
unnecessary[comp].add(parent)
else:
unnecessary[comp] = set([parent])
if unnecessary:
print('\nThese dependencies may be redundant.')
for comp, parent in unnecessary.iteritems():
for p in parent:
if comp[2] is None:
revision = ''
else:
revision = comp[2]
print('%s: %s, %s : %s' % (comp[0], comp[1], revision, p))
def _normLocales(locales):
if not locales:
return None
locales = [l10n.pslocale.bestFit(x) for x in _splitList(locales)]
locales = _uniquify(locales)
locales.sort()
return locales
def visit(path, visitor, recurser=None, report=True, excludePrograms=False, debug=False):
visitedFolders = 1
visitedFiles = 0
for folder, dirs, files in os.walk(path):
if debug:
print('For folder %s, original dirs=%s' % (folder, str(dirs)))
folder = norm_folder(folder)
if '.bzr' in dirs:
dirs.remove('.bzr')
# On top-level folder, if we're excluding programs (e.g., utilities),
# eliminate any folders that have components that build exe's.
if excludePrograms:
i = len(dirs) - 1
while i >= 0:
thisDir = dirs[i]
if (thisDir.lower() != 'test') and isProgramDir(folder + thisDir):
dirs.remove(thisDir)
i -= 1
excludePrograms = False
if not recurser is None:
dirs = recurser.select(folder, dirs)
if debug:
print('For folder %s, recurse candidates=%s' % (folder, str(dirs)))
# Does this folder have anything that overrides?
if METADATA_FILE in files:
files.remove(METADATA_FILE)
conf = Conf(folder, report=report, debug=debug)
else:
truncated = folder
if path[-1] == '/':
startFolder = path[0:path.rfind('/')]
else:
startFolder = path
while truncated != startFolder:
truncated = truncated[0:truncated.rfind('/')]
if METADATA_FILE in os.listdir(truncated):
conf = Conf(truncated, report=report, debug=debug)
break
else:
conf = None
relativePath = folder[len(path):]
i = len(dirs) - 1
while i >= 0:
thisDir = dirs[i]
if debug:
print('evaluating recursion for %s' % thisDir)
if conf:
if not conf.shouldRecurse(thisDir):
if debug:
print('Not recursing into %s%s' % (folder, thisDir))
dirs.remove(thisDir)
else:
if debug:
print('recursing into %s%s' % (folder, thisDir))
else:
m = NON_RECURSING_FOLDERS_PAT.match(thisDir)
if m:
if debug:
print('Not recursing into %s%s' % (folder, thisDir))
dirs.remove(thisDir)
i -= 1
visitedFolders += len(dirs)
for f in files:
if conf:
if conf.shouldCheck(f):
visitor.visit(folder, f, relativePath)
visitedFiles += 1
else:
if debug:
print('not checking %s' % f)
else:
if shouldCheck(f, debug=debug):
visitor.visit(folder, f, relativePath)
visitedFiles += 1
else:
if debug:
print('not checking %s' % f)
return visitedFiles, visitedFolders
def get_friendly_name_for_path(path):
#print('getting friendly name for %s' % path)
path = norm_folder(path)
i = path.find('/sandboxes/')
if i > -1:
path = path[i+11:]
while path.endswith('/'):
path = path[0:-1]
return path
class Conf:
def _findInConf(self, txt, pat):
m = pat.search(txt)
if m:
name = '_' + _keyToAttr(m.group(1))
setattr(self, name, m.group(2))
if self.report:
val = getattr(self, name)
if type(val) == _REGEX_TYPE:
val = val.pattern
#print('For %s, %s=%s' % (self.getRelativePath(), name, val))
def getRelativePath(self):
x = self.path
i = x.find('/code/')
if i > -1:
x = x[i + 6:]
return x
def __init__(self, path, report=True, debug=False):
self.path = norm_folder(path)
self.report = report
self.debug = debug
def getTargetedPlatforms(self):
pass
def getSupportedPlatforms(self):
pass
def getTargetedLocales(self, component=''):
tl = []
section = get_section_info_from_disk(UI_SECTION_PREFIX, os.path.join(self.path, component))
if UI_SECTION_PREFIX in section:
for ui in section[UI_SECTION_PREFIX].split(','):
uiSection = get_section_info_from_disk(ui, os.path.join(self.path, component))
if TARGETED_LOCALES_OPTION in uiSection:
locales = uiSection[TARGETED_LOCALES_OPTION].split(',')
for loc in locales:
if loc not in tl:
tl.append(loc)
return _normLocales(tl)
def getSupportedLocales(self):
pass
def getMilestone(self, component=''):
misc = get_section_info_from_disk(MISC_SECTION, os.path.join(self.path, component))
return misc.get(MILESTONE_OPTION)
def getExceptFolders(self):
section = get_section_info_from_disk(SCANNED_FOLDER_SECTION, self.path)
if EXCLUDE_OPTION_PREFIX in section:
return re.compile(section[EXCLUDE_OPTION_PREFIX])
return NON_RECURSING_FOLDERS_PAT
def getIncludeFolders(self):
section = get_section_info_from_disk(SCANNED_FOLDER_SECTION, self.path)
if INCLUDE_OPTION_PREFIX in section:
return re.compile(section[INCLUDE_OPTION_PREFIX])
return None
def getExceptFiles(self):
section = get_section_info_from_disk(SCANNED_FOLDER_SECTION, self.path)
if EXCLUDE_OPTION_PREFIX in section:
return re.compile(section[EXCLUDE_OPTION_PREFIX])
return None
def getIncludeFiles(self):
section = get_section_info_from_disk(SCANNED_FILE_SECTION, self.path)
if INCLUDE_OPTION_PREFIX in section:
return re.compile(section[INCLUDE_OPTION_PREFIX])
return INTERESTING_EXT_PAT
def getUiPaths(self):
paths = {}
uis = self.getUis()
if UI_SECTION_PREFIX in uis:
for ui in uis[UI_SECTION_PREFIX].split(','):
uiSection = get_section_info_from_disk(ui, self.path)
if UI_PATH_OPTION in uiSection:
paths[ui] = uiSection[UI_PATH_OPTION]
return paths
def getUis(self):
return get_section_info_from_disk(UI_SECTION_PREFIX, self.path)
def getUi(self, relpath):
paths = self.getUiPaths()
for ui in paths:
if relpath.find(paths[ui]) > -1:
return ui
def shouldRecurse(self, folder):
if folder == '.bzr':
return False
if self.debug:
print('deciding whether to recurse into %s' % folder)
exf = self.getExceptFolders()
if exf:
if exf.match(folder):
return False
else:
if self.debug:
print('doesnt match regex "%s"' % exf.pattern)
inf = self.getIncludeFolders()
if inf:
return bool(inf.match(folder))
return True
def shouldCheck(self, file):
answer = None
exf = self.getExceptFiles()
if exf:
if exf.match(file):
answer = False
if self.debug:
print('%s matched except pattern %s; should not check' % (file, exf.pattern))
if answer is None:
inf = self.getIncludeFiles()
if inf:
answer = bool(inf.match(file))
if self.debug:
print('re.match(regex["%s"], "%s") returned %s' % (inf.pattern, file, str(answer)))
return bool(answer)
def shouldCheck(file, debug=False):
answer = None
inf = INTERESTING_EXT_PAT
answer = bool(inf.match(file))
if debug:
print('re.match(regex["%s"], "%s") returned %s' % (inf.pattern, file, str(answer)))
return bool(answer)
_STR_TYPE = type('')
_USTR_TYPE = type(u'')
_LST_TYPE = type([])
_REGEX_TYPE = type(INTERESTING_EXT_PAT)
def _splitList(lst):
ltype = type(lst)
if ltype == _STR_TYPE or ltype == _USTR_TYPE:
lst = str(lst).replace(';',',').split(',')
elif ltype == _LST_TYPE:
lst = [str(x) for x in lst]
return lst
def _uniquify(lst):
x = {}
for item in lst:
x[item] = 1
return x.keys()
def _normPlat(plat):
'''Normalizes a platform name to something like "Windows 32-bit". This
name matches, by design, the name of the os and bitness returned by
buildinfo.py.'''
plat = str(plat).lower()
bitness = ''
if plat.find('win') > -1:
os = 'Windows'
elif plat.find('lin') > -1:
os = 'Linux'
elif (plat.find('darwin') > -1) or (plat.find('mac') > -1) or (plat.find('osx') > -1):
os = 'OSX'
if plat.find('64') > -1:
bitness = '64'
elif (plat.find('32') > -1) or (plat.find('86')):
bitness = '32'
plat = os
if bitness:
plat = plat + ' %s-bit' % bitness
return plat
def _normPlatforms(plats):
if plats is None:
return None
plats = [_normPlat(x) for x in _splitList(plats)]
plats = _uniquify(plats)
plats.sort()
return plats
_PROGRAM_PAT = re.compile(r'^\s*add_executable', re.IGNORECASE | re.MULTILINE)
def isProgramDir(path):
cmakelists = os.path.join(path, 'CMakeLists.txt')
if os.path.isfile(cmakelists):
txt = read_file(cmakelists)
return bool(_PROGRAM_PAT.search(txt))
return False
def print_tree(tree):
queue = [tree]
i = 0
parent = str(tree.parent)
while i < len(queue):
dep = queue[i]
for d in dep.dependencies:
queue.append(d)
if str(queue[i].parent) != parent:
parent = str(queue[i].parent)
print ''
print queue[i]
i += 1
ComponentInfo = collections.namedtuple('ComponentInfo', ['name', 'aspect', 'dependencies'])
class Components:
def __init__(self, working_repo, branch, componentname, excludes={'components':[],'tree':[]}):
self.omit_sandbox = excludes['components']
self.omit_tree = excludes['tree']
self.branch = branch
self.infos = {}
self.componentorder = [componentname]
self.top = componentname
self.infos[componentname] = ComponentInfo(componentname, 'code', [])
self.add_dependencies(working_repo, componentname)
def add_dependencies(self, working_repo, componentname):
## print('calculating dependencies for %s' % componentname)
deps = self.lookup_dependencies(working_repo, componentname, returnList=True)
wr = vcs.get_working_repository()
deps = [wr.normalize(comp, 'code', self.branch)[0] for comp in deps]
## print '\t', deps
for comp in deps:
self.infos[componentname].dependencies.append(comp)
if not comp in self.infos:
self.componentorder.append(comp)
if not comp in self.omit_tree:
self.infos[comp] = ComponentInfo(comp, 'code',[])
self.add_dependencies(working_repo, comp)
def lookup_dependencies(self, working_repo, componentname, returnList=False):
src = '%s/%s/%s/code/metadata.txt' % (working_repo.source_reporoot, self.branch, componentname)
p = subprocess.Popen(['bzr', 'cat', src], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if 0 != p.wait():
print 'unable to get deps for %s' % componentname
print p.stderr.read()
return []
conf = ConfigParser.ConfigParser()
conf.readfp(p.stdout)
deps = {}
comps = []
if DEPENDENCIES_SECTION in conf.sections():
for option in conf.options(DEPENDENCIES_SECTION):
comps.append(option)
deps[option] = conf.get(DEPENDENCIES_SECTION, option)
if MISC_SECTION in conf.sections():
for option in conf.options(MISC_SECTION):
if option.lower() == 'no buildupto sandbox':
self.omit_sandbox.append(componentname)
if returnList:
return comps
return deps
def dependency_order(self):
infos = copy.deepcopy(self.infos)
processed = []
while infos:
for name in self.componentorder:
if name in infos and (not infos[name].dependencies or set(self.omit_tree).intersection(infos[name].dependencies)):
info = infos[name]
processed.append(name)
del infos[name]
break
for name in self.componentorder:
if name in infos and processed[-1] in infos[name].dependencies:
infos[name].dependencies.remove(processed[-1])
return [name for name in processed if not name in self.omit_sandbox]
def get_components_in_product(working_repo, branch, topcomponent, excludes={'components':[],'tree':[]}):
components = Components(working_repo, branch, topcomponent, excludes)
return components.dependency_order()
def _define_options():
parser = optparse.OptionParser('Usage: %prog [options]\n\nEvaluate sandbox and record results.')
parser.add_option('--sandbox', dest="sandbox",
help="path to sandbox to build",
metavar="FLDR", default=sandbox.current.get_root())
parser.add_option('--dry-run', dest="dry_run", action='store_true', help="simulate and return success", default=False)
return parser
if __name__ == '__main__':
parser = _define_options()
options, args = parser.parse_args(sys.argv)
try:
sb = sandbox.create_from_within(options.sandbox)
if not sb:
print('%s does not appear to be inside a sandbox.' % os.path.abspath(options.sandbox))
err = 1
else:
if 1:
import pprint
pprint.pprint(get_components_in_product(branch=sb.get_branch(), topcomponent=sb.get_top_component()))
else:
deps, tree = get_components_inv_dep_order(vcs.get_working_repository(), sb.get_top_component(), code_root=sb.get_code_root(),
branch=sb.get_branch(), aspect=sb.get_component_reused_aspect(sb.get_top_component()), debug=True)
print('Dependencies:')
for comp in deps:
print comp
print('-----------------------------------')
print('Full dependance tree')
print_tree(tree)
err = 0
except:
traceback.print_exc()
err = 1
sys.exit(err)
| {
"content_hash": "9e3d98a61bccf7fcaaae7213d96b116e",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 191,
"avg_line_length": 40.28605200945626,
"alnum_prop": 0.5858811102634822,
"repo_name": "perfectsearch/sandman",
"id": "c0a35be00acbce57eb1425a60619a6de05d8924b",
"size": "34255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/buildscripts/metadata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1219877"
},
{
"name": "Shell",
"bytes": "4654"
}
],
"symlink_target": ""
} |
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.dialogflow_v2.types import intent
from google.cloud.dialogflow_v2.types import intent as gcd_intent
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import IntentsTransport, DEFAULT_CLIENT_INFO
class IntentsGrpcTransport(IntentsTransport):
"""gRPC backend transport for Intents.
Service for managing [Intents][google.cloud.dialogflow.v2.Intent].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def list_intents(
self,
) -> Callable[[intent.ListIntentsRequest], intent.ListIntentsResponse]:
r"""Return a callable for the list intents method over gRPC.
Returns the list of all intents in the specified
agent.
Returns:
Callable[[~.ListIntentsRequest],
~.ListIntentsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_intents" not in self._stubs:
self._stubs["list_intents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/ListIntents",
request_serializer=intent.ListIntentsRequest.serialize,
response_deserializer=intent.ListIntentsResponse.deserialize,
)
return self._stubs["list_intents"]
@property
def get_intent(self) -> Callable[[intent.GetIntentRequest], intent.Intent]:
r"""Return a callable for the get intent method over gRPC.
Retrieves the specified intent.
Returns:
Callable[[~.GetIntentRequest],
~.Intent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_intent" not in self._stubs:
self._stubs["get_intent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/GetIntent",
request_serializer=intent.GetIntentRequest.serialize,
response_deserializer=intent.Intent.deserialize,
)
return self._stubs["get_intent"]
@property
def create_intent(
self,
) -> Callable[[gcd_intent.CreateIntentRequest], gcd_intent.Intent]:
r"""Return a callable for the create intent method over gRPC.
Creates an intent in the specified agent.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Returns:
Callable[[~.CreateIntentRequest],
~.Intent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_intent" not in self._stubs:
self._stubs["create_intent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/CreateIntent",
request_serializer=gcd_intent.CreateIntentRequest.serialize,
response_deserializer=gcd_intent.Intent.deserialize,
)
return self._stubs["create_intent"]
@property
def update_intent(
self,
) -> Callable[[gcd_intent.UpdateIntentRequest], gcd_intent.Intent]:
r"""Return a callable for the update intent method over gRPC.
Updates the specified intent.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Returns:
Callable[[~.UpdateIntentRequest],
~.Intent]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_intent" not in self._stubs:
self._stubs["update_intent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/UpdateIntent",
request_serializer=gcd_intent.UpdateIntentRequest.serialize,
response_deserializer=gcd_intent.Intent.deserialize,
)
return self._stubs["update_intent"]
@property
def delete_intent(self) -> Callable[[intent.DeleteIntentRequest], empty_pb2.Empty]:
r"""Return a callable for the delete intent method over gRPC.
Deletes the specified intent and its direct or indirect followup
intents.
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Returns:
Callable[[~.DeleteIntentRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_intent" not in self._stubs:
self._stubs["delete_intent"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/DeleteIntent",
request_serializer=intent.DeleteIntentRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_intent"]
@property
def batch_update_intents(
self,
) -> Callable[[intent.BatchUpdateIntentsRequest], operations_pb2.Operation]:
r"""Return a callable for the batch update intents method over gRPC.
Updates/Creates multiple intents in the specified agent.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/es/docs/how/long-running-operations>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``:
[BatchUpdateIntentsResponse][google.cloud.dialogflow.v2.BatchUpdateIntentsResponse]
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Returns:
Callable[[~.BatchUpdateIntentsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_update_intents" not in self._stubs:
self._stubs["batch_update_intents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/BatchUpdateIntents",
request_serializer=intent.BatchUpdateIntentsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_update_intents"]
@property
def batch_delete_intents(
self,
) -> Callable[[intent.BatchDeleteIntentsRequest], operations_pb2.Operation]:
r"""Return a callable for the batch delete intents method over gRPC.
Deletes intents in the specified agent.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/es/docs/how/long-running-operations>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``: An `Empty
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`__
Note: You should always train an agent prior to sending it
queries. See the `training
documentation <https://cloud.google.com/dialogflow/es/docs/training>`__.
Returns:
Callable[[~.BatchDeleteIntentsRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "batch_delete_intents" not in self._stubs:
self._stubs["batch_delete_intents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2.Intents/BatchDeleteIntents",
request_serializer=intent.BatchDeleteIntentsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["batch_delete_intents"]
def close(self):
self.grpc_channel.close()
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("IntentsGrpcTransport",)
| {
"content_hash": "e676731891e5a3123409dd9878428764",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 108,
"avg_line_length": 44.34234234234234,
"alnum_prop": 0.6229175132060139,
"repo_name": "googleapis/python-dialogflow",
"id": "0e230676b8d26016874e8b6c7156f04712aa24ef",
"size": "25210",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/dialogflow_v2/services/intents/transports/grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
import sphinx_rtd_theme
from sqlitebiter import __author__, __copyright__, __name__, __version__
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = __name__
copyright = __copyright__
author = __author__
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'sqlitebiter v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'sqlitebiterdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'sqlitebiter.tex', 'sqlitebiter Documentation',
'Tsuyoshi Hombashi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'sqlitebiter', 'sqlitebiter Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'sqlitebiter', 'sqlitebiter Documentation',
author, 'sqlitebiter', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "9565b46b740932bca8c9400f7c040874",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 80,
"avg_line_length": 32.51459854014598,
"alnum_prop": 0.7027724772701762,
"repo_name": "thombashi/sqlitebiter",
"id": "b65dae145d3725d0c72d74b029b349a97edfba40",
"size": "8909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "Dockerfile",
"bytes": "559"
},
{
"name": "Jupyter Notebook",
"bytes": "48962"
},
{
"name": "Makefile",
"bytes": "2262"
},
{
"name": "PowerShell",
"bytes": "415"
},
{
"name": "Python",
"bytes": "151333"
},
{
"name": "Shell",
"bytes": "4402"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from yepes.utils.minifier import minify_css, minify_js
__all__ = ('HtmlMinifier', 'minify')
PLACEHOLDER = '~({{[{0}]}})~'
COMMENTS_RE = re.compile(r'<!--.*?-->', re.DOTALL)
CONDITIONAL_COMMENTS_RE = re.compile(r'(<!--\[[^\]]+\]>)(.*?)(<!\[[^\]]+\]-->)', re.DOTALL)
EMPTY_LINES_RE = re.compile(r'\n{2,}')
LEADING_SPACES_RE = re.compile(r'^ +', re.M)
NEWLINES_RE = re.compile(r'(\r\n|\r)')
PLACEHOLDERS_RE = re.compile(r'\~\(\{\[(\d+)\]\}\)\~')
PRES_RE = re.compile(r'(<pre\b.*?>)(.*?)(<\/pre>)', re.I | re.DOTALL)
SCRIPTS_RE = re.compile(r'(<script\b.*?>)(.*?)(<\/script>)', re.I | re.DOTALL)
STYLES_RE = re.compile(r'(<style\b.*?>)(.*?)(<\/style>)', re.I | re.DOTALL)
TEXTAREAS_RE = re.compile(r'(<textarea\b.*?>)(.*?)(<\/textarea>)', re.I | re.DOTALL)
TRAILING_SPACES_RE = re.compile(r' +$', re.M)
WHITESPACES_RE = re.compile(r'[ \t]+')
class HtmlMinifier(object):
"""
Removes all extra whitespaces, comments and other unneeded characters.
This class take code from ``html_press`` project.
"""
def minify(self, code):
self.placeholders = []
return self._minify(code)
def _minify(self, code):
code = self.process_newlines(code)
code = self.process_conditional_comments(code)
code = self.process_comments(code)
code = self.process_pres(code)
code = self.process_textareas(code)
code = self.process_scripts(code)
code = self.process_styles(code)
code = self.process_whitespaces(code)
code = self.process_leading_spaces(code)
code = self.process_trailing_spaces(code)
code = self.process_empty_lines(code)
code = self.fill_placeholders(code)
return code
def conditional_coments_replacement(self, matchobj):
opening_tag = matchobj.group(1)
content = self._minify(matchobj.group(2))
closing_tag = matchobj.group(3)
return self.reserve(''.join((opening_tag, content, closing_tag)))
def fill_placeholders(self, code):
return PLACEHOLDERS_RE.sub(
self.placeholders_replacement,
code)
def placeholders_replacement(self, matchobj):
try:
return self.placeholders[int(matchobj.group(1))]
except IndexError:
return ''
def pres_replacement(self, matchobj):
opening_tag = matchobj.group(1)
content = matchobj.group(2)
closing_tag = matchobj.group(3)
return ''.join((opening_tag, self.reserve(content), closing_tag))
def process_comments(self, code):
return COMMENTS_RE.sub('', code)
def process_conditional_comments(self, code):
return CONDITIONAL_COMMENTS_RE.sub(
self.conditional_coments_replacement,
code)
def process_empty_lines(self, code):
return EMPTY_LINES_RE.sub(r'\n', code)
def process_leading_spaces(self, code):
return LEADING_SPACES_RE.sub('', code)
def process_newlines(self, code):
return NEWLINES_RE.sub(r'\n', code)
def process_pres(self, code):
return PRES_RE.sub(
self.pres_replacement,
code)
def process_scripts(self, code):
return SCRIPTS_RE.sub(
self.scripts_replacement,
code)
def process_styles(self, code):
return STYLES_RE.sub(
self.styles_replacement,
code)
def process_textareas(self, code):
return TEXTAREAS_RE.sub(
self.textareas_replacement,
code)
def process_trailing_spaces(self, code):
return TRAILING_SPACES_RE.sub('', code)
def process_whitespaces(self, code):
return WHITESPACES_RE.sub(' ', code)
def reserve(self, code):
self.placeholders.append(code)
return PLACEHOLDER.format(len(self.placeholders) - 1)
def scripts_replacement(self, matchobj):
opening_tag = matchobj.group(1)
content = matchobj.group(2)
if not content.strip():
content = ''
else:
content = minify_js(content)
if content.endswith('\n'):
content = ''.join(('\n', content))
closing_tag = matchobj.group(3)
return ''.join((opening_tag, self.reserve(content), closing_tag))
def styles_replacement(self, matchobj):
opening_tag = matchobj.group(1)
content = matchobj.group(2)
if not content.strip():
content = ''
else:
content = minify_css(content)
if content.endswith('\n'):
content = ''.join(('\n', content))
closing_tag = matchobj.group(3)
return ''.join((opening_tag, self.reserve(content), closing_tag))
def textareas_replacement(self, matchobj):
opening_tag = matchobj.group(1)
content = matchobj.group(2)
closing_tag = matchobj.group(3)
return ''.join((opening_tag, self.reserve(content), closing_tag))
def minify(code):
minifier = HtmlMinifier()
return minifier.minify(code)
| {
"content_hash": "fda7adeb5eac611a60d3491fe401afdb",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 91,
"avg_line_length": 32.43037974683544,
"alnum_prop": 0.5901639344262295,
"repo_name": "samuelmaudo/yepes",
"id": "7dd02a3e19494c166252e0d8b488b0dff014b6c5",
"size": "5148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yepes/utils/minifier/backends/html.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1485"
},
{
"name": "CSS",
"bytes": "2805"
},
{
"name": "HTML",
"bytes": "18543"
},
{
"name": "JavaScript",
"bytes": "56039"
},
{
"name": "Python",
"bytes": "2415982"
}
],
"symlink_target": ""
} |
import paramiko
import io
import socket
import stat
import threading
from .. import base
from girder.api import sftp
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.upload import Upload
from girder.models.user import User
server = None
TEST_PORT = 10551
TEST_KEY = paramiko.RSAKey.from_private_key(io.StringIO("""-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAwdH5tlaZu52adYvW57DcAFknzOKX8+/axDmQdTcg1HwEOnT2
TMSFGciwUQMmya+0i23ZOUtZQutj8fb66szrBZ7qpIvSG6TRyxGuM6PkfAUcBCHO
TGFzaJPnnvUXC8dlxoUIdBaUCmSblvj2q2CTNy53ybAmiiSpahjvBO16pvjbNn+i
EGucSQn71OTMhoSOWtS/VcJC6JPd6kxSdl1EiESbOrjAdNDKMBnfYCkxPG4ulAqe
y5jpfgQiUC0Q3CoWbj/ybAv73JsFndPcpvI8n5EsXeptuWI4CXSorYOuVwURLuzP
z1PkI4ZsYnSnuQG/GReAZnwVDaVJ/uhYMMs1sQIDAQABAoIBADKOmguFBW7aCntU
8cbX7Fsu5mHcTXS1ASSkO1lH+wlSHCw/bCvUKz/xiIRpRQnhCkBAdCQs0mjRS+3G
1ea/cyKxNFWdnz3UvWCyCPWxb50mHAu74bssxFToF8fv+IX7CkJBW1YkuZMIcUlt
QbKsa1o+hcKXb0YjkAl73YU0iQTaet7B1x1X0qkVPEWWURTg3z65TNI96t8p28dh
4HgEoU0Jtfsfzb7u1H4/m3Q28J1S+cTkER/VIgLzMeYXr2MooIQc3QAMXATpXkhM
y6u0LYh+kW1XD4ZnyzTp49BMf76rS8VhsYN6f+jLhJUf/5O+m8NFGuCq15TFyQAH
vMBxPRECgYEA4+fxYuuOq+SilYpejD4EMwvrClixHOfTojlnAyUaJZSnyVp/Y4l+
QmFmbNpfRKN1fv24e9f9CmA8nd5A3kxBjJFhzaaxbFG+jI47fqOu9NadXPHaxvyq
BI2aHx4sqp/Z/ct/klht5hxD8UFMRFbaaLYAojKg1nL0g/88wwwN9LUCgYEA2bZh
873OGT7sNXHin2rXD5XEYXqjLy51hed4ZdtJXFrKhg8ozWqaOZ79GXustdRanzTV
zDeTweI0hg7adbKyBNeuQF8VSOK6ws2wPPCuUbQTVYaepqPuT+VhzAB1GVJ1uF/T
YxgqXOvg9QwnZ4Fjlv3b/52R89bTP+Yr6GcQdo0CgYAvLQ38igIodtVo2xGjOhso
bekjZSSUdTCLvhIixoVZDiKFPaRs+EMYfozzL2jVDnj95otPp3ALu8wQabdHzMUs
0dNK/JxxbaJh+fc6yasnp10/phjBY//VnXIvytE4KIq5TGyF4KQvI960i+27n7bq
QfJzoMNGYNlYkXcEcPRamQKBgQCVCYWElirAnZKWA6BgAYO3547ILGwJoIRTZmHF
WJif4IdDvpzwAkoRqAUbrM5Oq1BeLI0vf9xmnbPXEdP7PpkfN4bSCkVH3+557NT4
4spypBOYOM/iw9YgW6bXQHjpHMn5rZ/H9oMJmXAmUGupL6o9cwtnsTZ49lcnJypn
riZXAQKBgQCgiJ/A11HX7fUgFzBB9no2Sy1hS3u1Ld35nZf7RDegVoEn/UdWdOxn
H2T9t0EzIoSqkfPRrsqN8sv/TMIohS6frOpBojEvwUs5mxjVwswq/QgBSV2FqYck
VeccLgZzTSMNzCDMbtM+zGG5WktzFojrMIhfD0SM3CB3jECF+Dfdtg==
-----END RSA PRIVATE KEY-----
"""))
def setUpModule():
global server
server = sftp.SftpServer(('localhost', TEST_PORT), TEST_KEY)
serverThread = threading.Thread(target=server.serve_forever)
serverThread.daemon = True
serverThread.start()
def tearDownModule():
if server:
server.shutdown()
server.server_close()
base.dropAllTestDatabases()
class SftpTestCase(base.TestCase):
def testSftpService(self):
users = ({
'email': 'admin@girder.test',
'login': 'admin',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
}, {
'email': 'regularuser@girder.test',
'login': 'regularuser',
'firstName': 'First',
'lastName': 'Last',
'password': 'passwd'
})
admin, user = [User().createUser(**user) for user in users]
collections = ({
'name': 'public collection',
'public': True,
'creator': admin
}, {
'name': 'private collection',
'public': False,
'creator': admin
})
privateFolder = Folder().findOne({
'parentCollection': 'user',
'parentId': user['_id'],
'name': 'Private'
})
self.assertIsNotNone(privateFolder)
Upload().uploadFromFile(
io.BytesIO(b'hello world'), size=11, name='test.txt', parentType='folder',
parent=privateFolder, user=user)
for coll in collections:
Collection().createCollection(**coll)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# Incorrect password should raise authentication error
with self.assertRaises(paramiko.AuthenticationException):
client.connect(
'localhost', TEST_PORT, username='admin', password='badpass', look_for_keys=False,
allow_agent=False)
# Authenticate as admin
client.connect(
'localhost', TEST_PORT, username='admin', password='passwd', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
self.assertEqual(sftpClient.listdir('/'), ['collection', 'user'])
# Listing an invalid top level entity should fail
with self.assertRaises(IOError):
sftpClient.listdir('/foo')
# Test listing of users, collections, and subfolders
self.assertEqual(set(sftpClient.listdir('/user/')), {'admin', 'regularuser'})
self.assertEqual(set(sftpClient.listdir('/user/admin')), {'Public', 'Private'})
self.assertEqual(
set(sftpClient.listdir('/collection')), {'public collection', 'private collection'})
self.assertEqual(sftpClient.listdir('/user/regularuser/Private'), ['test.txt'])
self.assertEqual(sftpClient.listdir('/user/regularuser/Private/test.txt'), ['test.txt'])
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.listdir('/user/nonexistent')
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private')
# Read a file using small enough buf size to require multiple chunks internally.
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
self.assertEqual(file.read(2), b'he')
self.assertEqual(file.read(), b'llo world')
# Make sure we enforce max buffer length
tmp, sftp.MAX_BUF_LEN = sftp.MAX_BUF_LEN, 2
file = sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r', bufsize=4)
with self.assertRaises(IOError):
file.read()
sftp.MAX_BUF_LEN = tmp
# Test stat capability
info = sftpClient.stat('/user/regularuser/Private')
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
# lstat should also work
info = sftpClient.lstat('/user/regularuser/Private/test.txt/test.txt')
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# File stat implementations should agree
info = file.stat()
self.assertFalse(stat.S_ISDIR(info.st_mode))
self.assertTrue(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_size, 11)
self.assertEqual(info.st_mode & 0o777, 0o777)
# Make sure we can stat the top-level entities
for path in ('/', '/user', '/collection'):
info = sftpClient.stat(path)
self.assertTrue(stat.S_ISDIR(info.st_mode))
self.assertFalse(stat.S_ISREG(info.st_mode))
self.assertEqual(info.st_mode & 0o777, 0o777)
sftpClient.close()
client.close()
# Test that any username other than anonymous will fail using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('')
trans.close()
sock.close()
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
with self.assertRaises(paramiko.ssh_exception.BadAuthenticationType):
trans.auth_none('eponymous')
trans.close()
sock.close()
# Test that a connection can be opened for anonymous access using auth_none.
sock = socket.socket()
sock.connect(('localhost', TEST_PORT))
trans = paramiko.Transport(sock)
trans.connect()
trans.auth_none(username='anonymous')
sftpClient = paramiko.SFTPClient.from_transport(trans)
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
trans.close()
sock.close()
# Test anonymous access
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(
'localhost', TEST_PORT, username='anonymous', password='', look_for_keys=False,
allow_agent=False)
sftpClient = client.open_sftp()
# Only public data should be visible
self.assertEqual(set(sftpClient.listdir('/user')), {'admin', 'regularuser'})
self.assertEqual(sftpClient.listdir('/collection'), ['public collection'])
self.assertEqual(sftpClient.listdir('/user/admin'), ['Public'])
# Make sure the client cannot distinguish between a resource that does not exist
# vs. one they simply don't have read access to.
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.listdir('/user/regularuser/Private')
with self.assertRaisesRegex(IOError, 'No such file'):
sftpClient.file('/user/regularuser/Private/test.txt/test.txt', 'r')
sftpClient.close()
client.close()
| {
"content_hash": "2d19176ab79123710eed50dd9114083b",
"timestamp": "",
"source": "github",
"line_count": 249,
"max_line_length": 98,
"avg_line_length": 40.1566265060241,
"alnum_prop": 0.6745674567456745,
"repo_name": "RafaelPalomar/girder",
"id": "e7683f563aaee42a2a46657d6b0180f873245345",
"size": "10023",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/cases/sftp_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "57029"
},
{
"name": "CSS",
"bytes": "53651"
},
{
"name": "HTML",
"bytes": "149014"
},
{
"name": "JavaScript",
"bytes": "1207526"
},
{
"name": "Mako",
"bytes": "8247"
},
{
"name": "Python",
"bytes": "2003341"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "10595"
},
{
"name": "Shell",
"bytes": "10823"
}
],
"symlink_target": ""
} |
import sys
from pybindgen import param, retval
from dune.pymor.core import prepare_python_bindings, inject_lib_dune_pymor, finalize_python_bindings
from dune.pymor.discretizations import inject_StationaryDiscretizationImplementation
def inject_Example(module):
'''injects the user code into the module'''
namespace = module.add_cpp_namespace('Example')
AnalyticalProblem = namespace.add_class('AnalyticalProblem')
AnalyticalProblem.add_constructor([])
AnalyticalProblem.add_constructor([param('const int', 'dd')])
if __name__ == '__main__':
# prepare the module
module, pybindgen_filename, config_h_filename = prepare_python_bindings(sys.argv[1:])
# add all of libdunepymor
module, exceptions, interfaces, CONFIG_H = inject_lib_dune_pymor(module, config_h_filename)
# add example user code
inject_Example(module)
# add the users discretization
MatrixType = 'Dune::Stuff::LA::CommonDenseMatrix< double >'
VectorType = 'Dune::Stuff::LA::CommonDenseVector< double >'
OperatorType = 'Dune::Pymor::Operators::LinearAffinelyDecomposedContainerBased< ' + MatrixType + ', ' + VectorType + ' >'
discretization = inject_StationaryDiscretizationImplementation(
module, exceptions, interfaces, CONFIG_H,
'Example::SimpleDiscretization',
Traits={'VectorType': VectorType,
'OperatorType': OperatorType,
'FunctionalType': 'Dune::Pymor::Functionals::LinearAffinelyDecomposedVectorBased< ' + VectorType + ' >',
'ProductType': OperatorType})
# and add the custom constructor to the discretization
discretization.add_constructor([param('const Example::AnalyticalProblem *', 'prob', transfer_ownership=True)])
# and finally write the pybindgen .cc file
finalize_python_bindings(module, pybindgen_filename)
| {
"content_hash": "bd2f2b4b488ad55a533f11e85811635b",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 125,
"avg_line_length": 49.810810810810814,
"alnum_prop": 0.7173087357569181,
"repo_name": "pymor/dune-pymor",
"id": "60d4e30e49b2d1bfd76fe040a00bb6e35675e11b",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/stationarylinear_bindings_generator.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "194989"
},
{
"name": "CMake",
"bytes": "6138"
},
{
"name": "M4",
"bytes": "1647"
},
{
"name": "Makefile",
"bytes": "2522"
},
{
"name": "Python",
"bytes": "136052"
}
],
"symlink_target": ""
} |
from quantum.api.v2 import attributes
from quantum.common import exceptions as qexception
class PortSecurityPortHasSecurityGroup(qexception.InUse):
message = _("Port has security group associated. Cannot disable port "
"security or ip address until security group is removed")
class PortSecurityAndIPRequiredForSecurityGroups(qexception.InvalidInput):
message = _("Port security must be enabled and port must have an IP"
" address in order to use security groups.")
class PortSecurityBindingNotFound(qexception.InvalidExtensionEnv):
message = _("Port does not have port security binding.")
PORTSECURITY = 'port_security_enabled'
EXTENDED_ATTRIBUTES_2_0 = {
'networks': {
PORTSECURITY: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': True,
'is_visible': True},
},
'ports': {
PORTSECURITY: {'allow_post': True, 'allow_put': True,
'convert_to': attributes.convert_to_boolean,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True},
}
}
class Portsecurity(object):
"""Extension class supporting port security
"""
@classmethod
def get_name(cls):
return "Port Security"
@classmethod
def get_alias(cls):
return "port-security"
@classmethod
def get_description(cls):
return "Provides port security"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/portsecurity/api/v1.0"
@classmethod
def get_updated(cls):
return "2012-07-23T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| {
"content_hash": "efddd14863dcd88aad2727576b6e7480",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 74,
"avg_line_length": 29.936507936507937,
"alnum_prop": 0.6214209968186638,
"repo_name": "tpaszkowski/quantum",
"id": "85b83ccbdf5cadeb753c6a5bc0af9bfeddc7fd4a",
"size": "2607",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quantum/extensions/portsecurity.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "4007121"
},
{
"name": "Scala",
"bytes": "4561"
},
{
"name": "Shell",
"bytes": "8090"
},
{
"name": "XML",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from direct.distributed.ClockDelta import *
from direct.showbase.PythonUtil import Functor
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.fsm import FSM
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.directnotify import DirectNotifyGlobal
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
import DistributedBossCog
from toontown.toonbase import TTLocalizer
import SuitDNA
from toontown.toon import Toon
from toontown.battle import BattleBase
from direct.directutil import Mopath
from direct.showutil import Rope
from toontown.distributed import DelayDelete
from toontown.battle import MovieToonVictory
from toontown.building import ElevatorUtils
from toontown.battle import RewardPanel
from toontown.toon import NPCToons
from direct.task import Task
import random
import math
from toontown.coghq import CogDisguiseGlobals
from toontown.suit import SellbotBossGlobals
OneBossCog = None
class DistributedSellbotBoss(DistributedBossCog.DistributedBossCog, FSM.FSM):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSellbotBoss')
cageHeights = [100,
81,
63,
44,
25,
18]
def __init__(self, cr):
DistributedBossCog.DistributedBossCog.__init__(self, cr)
FSM.FSM.__init__(self, 'DistributedSellbotBoss')
self.cagedToonNpcId = None
self.doobers = []
self.dooberRequest = None
self.bossDamage = 0
self.attackCode = None
self.attackAvId = 0
self.recoverRate = 0
self.recoverStartTime = 0
self.bossDamageMovie = None
self.cagedToon = None
self.cageShadow = None
self.cageIndex = 0
self.everThrownPie = 0
self.battleThreeMusicTime = 0
self.insidesANodePath = None
self.insidesBNodePath = None
self.rampA = None
self.rampB = None
self.rampC = None
self.strafeInterval = None
self.onscreenMessage = None
self.toonMopathInterval = []
self.nerfed = ToontownGlobals.SELLBOT_NERF_HOLIDAY in base.cr.newsManager.getHolidayIdList()
self.localToonPromoted = True
self.resetMaxDamage()
return
def announceGenerate(self):
global OneBossCog
DistributedBossCog.DistributedBossCog.announceGenerate(self)
self.setName(TTLocalizer.SellbotBossName)
nameInfo = TTLocalizer.BossCogNameWithDept % {'name': self.name,
'dept': SuitDNA.getDeptFullname(self.style.dept)}
self.setDisplayName(nameInfo)
self.cageDoorSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_door.mp3')
self.cageLandSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_SOS_cage_land.mp3')
self.cageLowerSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_lower.mp3')
self.piesRestockSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_SOS_pies_restock.mp3')
self.rampSlideSfx = loader.loadSfx('phase_9/audio/sfx/CHQ_VP_ramp_slide.mp3')
self.strafeSfx = []
for i in range(10):
self.strafeSfx.append(loader.loadSfx('phase_3.5/audio/sfx/SA_shred.mp3'))
render.setTag('pieCode', str(ToontownGlobals.PieCodeNotBossCog))
insidesA = CollisionPolygon(Point3(4.0, -2.0, 5.0), Point3(-4.0, -2.0, 5.0), Point3(-4.0, -2.0, 0.5), Point3(4.0, -2.0, 0.5))
insidesANode = CollisionNode('BossZap')
insidesANode.addSolid(insidesA)
insidesANode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesANodePath = self.axle.attachNewNode(insidesANode)
self.insidesANodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesANodePath.stash()
insidesB = CollisionPolygon(Point3(-4.0, 2.0, 5.0), Point3(4.0, 2.0, 5.0), Point3(4.0, 2.0, 0.5), Point3(-4.0, 2.0, 0.5))
insidesBNode = CollisionNode('BossZap')
insidesBNode.addSolid(insidesB)
insidesBNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.WallBitmask)
self.insidesBNodePath = self.axle.attachNewNode(insidesBNode)
self.insidesBNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossInsides))
self.insidesBNodePath.stash()
target = CollisionTube(0, -1, 4, 0, -1, 9, 3.5)
targetNode = CollisionNode('BossZap')
targetNode.addSolid(target)
targetNode.setCollideMask(ToontownGlobals.PieBitmask)
self.targetNodePath = self.pelvis.attachNewNode(targetNode)
self.targetNodePath.setTag('pieCode', str(ToontownGlobals.PieCodeBossCog))
shield = CollisionTube(0, 1, 4, 0, 1, 7, 3.5)
shieldNode = CollisionNode('BossZap')
shieldNode.addSolid(shield)
shieldNode.setCollideMask(ToontownGlobals.PieBitmask | ToontownGlobals.CameraBitmask)
shieldNodePath = self.pelvis.attachNewNode(shieldNode)
disk = loader.loadModel('phase_9/models/char/bossCog-gearCollide')
disk.find('**/+CollisionNode').setName('BossZap')
disk.reparentTo(self.pelvis)
disk.setZ(0.8)
self.loadEnvironment()
self.__makeCagedToon()
self.__loadMopaths()
if OneBossCog != None:
self.notify.warning('Multiple BossCogs visible.')
OneBossCog = self
return
def disable(self):
global OneBossCog
DistributedBossCog.DistributedBossCog.disable(self)
self.request('Off')
self.unloadEnvironment()
self.__unloadMopaths()
self.__cleanupCagedToon()
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
self.__cleanupStrafe()
render.clearTag('pieCode')
self.targetNodePath.detachNode()
self.cr.relatedObjectMgr.abortRequest(self.dooberRequest)
self.dooberRequest = None
self.betweenBattleMusic.stop()
self.promotionMusic.stop()
self.stingMusic.stop()
self.battleTwoMusic.stop()
self.battleThreeMusic.stop()
self.epilogueMusic.stop()
while len(self.toonMopathInterval):
toonMopath = self.toonMopathInterval[0]
toonMopath.finish()
toonMopath.destroy()
self.toonMopathInterval.remove(toonMopath)
if OneBossCog == self:
OneBossCog = None
return
def resetMaxDamage(self):
if self.nerfed:
self.bossMaxDamage = ToontownGlobals.SellbotBossMaxDamageNerfed
else:
self.bossMaxDamage = ToontownGlobals.SellbotBossMaxDamage
def d_hitBoss(self, bossDamage):
self.sendUpdate('hitBoss', [bossDamage])
def d_hitBossInsides(self):
self.sendUpdate('hitBossInsides', [])
def d_hitToon(self, toonId):
self.sendUpdate('hitToon', [toonId])
def setCagedToonNpcId(self, npcId):
self.cagedToonNpcId = npcId
def gotToon(self, toon):
stateName = self.state
if stateName == 'Elevator':
self.placeToonInElevator(toon)
def setDooberIds(self, dooberIds):
self.doobers = []
self.cr.relatedObjectMgr.abortRequest(self.dooberRequest)
self.dooberRequest = self.cr.relatedObjectMgr.requestObjects(dooberIds, allCallback=self.__gotDoobers)
def __gotDoobers(self, doobers):
self.dooberRequest = None
self.doobers = doobers
return
def setBossDamage(self, bossDamage, recoverRate, timestamp):
recoverStartTime = globalClockDelta.networkToLocalTime(timestamp)
self.bossDamage = bossDamage
self.recoverRate = recoverRate
self.recoverStartTime = recoverStartTime
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
if self.bossDamageMovie:
if self.bossDamage >= self.bossMaxDamage:
self.bossDamageMovie.resumeUntil(self.bossDamageMovie.getDuration())
else:
self.bossDamageMovie.resumeUntil(self.bossDamage * self.bossDamageToMovie)
if self.recoverRate:
taskMgr.add(self.__recoverBossDamage, taskName)
def getBossDamage(self):
now = globalClock.getFrameTime()
elapsed = now - self.recoverStartTime
return max(self.bossDamage - self.recoverRate * elapsed / 60.0, 0)
def __recoverBossDamage(self, task):
self.bossDamageMovie.setT(self.getBossDamage() * self.bossDamageToMovie)
return Task.cont
def __makeCagedToon(self):
if self.cagedToon:
return
self.cagedToon = NPCToons.createLocalNPC(self.cagedToonNpcId)
self.cagedToon.addActive()
self.cagedToon.reparentTo(self.cage)
self.cagedToon.setPosHpr(0, -2, 0, 180, 0, 0)
self.cagedToon.loop('neutral')
touch = CollisionPolygon(Point3(-3.0382, 3.0382, -1), Point3(3.0382, 3.0382, -1), Point3(3.0382, -3.0382, -1), Point3(-3.0382, -3.0382, -1))
touchNode = CollisionNode('Cage')
touchNode.setCollideMask(ToontownGlobals.WallBitmask)
touchNode.addSolid(touch)
self.cage.attachNewNode(touchNode)
def __cleanupCagedToon(self):
if self.cagedToon:
self.cagedToon.removeActive()
self.cagedToon.delete()
self.cagedToon = None
return
def __walkToonToPromotion(self, toonId, delay, mopath, track, delayDeletes):
toon = base.cr.doId2do.get(toonId)
if toon:
destPos = toon.getPos()
self.placeToonInElevator(toon)
toon.wrtReparentTo(render)
walkMopath = MopathInterval(mopath, toon)
ival = Sequence(Wait(delay), Func(toon.suit.setPlayRate, 1, 'walk'), Func(toon.suit.loop, 'walk'), toon.posInterval(1, Point3(0, 90, 20)), ParallelEndTogether(walkMopath, toon.posInterval(2, destPos, blendType='noBlend')), Func(toon.suit.loop, 'neutral'))
self.toonMopathInterval.append(walkMopath)
track.append(ival)
delayDeletes.append(DelayDelete.DelayDelete(toon, 'SellbotBoss.__walkToonToPromotion'))
def __walkDoober(self, suit, delay, turnPos, track, delayDeletes):
turnPos = Point3(*turnPos)
turnPosDown = Point3(*ToontownGlobals.SellbotBossDooberTurnPosDown)
flyPos = Point3(*ToontownGlobals.SellbotBossDooberFlyPos)
seq = Sequence(Func(suit.headsUp, turnPos), Wait(delay), Func(suit.loop, 'walk', 0), self.__walkSuitToPoint(suit, suit.getPos(), turnPos), self.__walkSuitToPoint(suit, turnPos, turnPosDown), self.__walkSuitToPoint(suit, turnPosDown, flyPos), suit.beginSupaFlyMove(flyPos, 0, 'flyAway'), Func(suit.fsm.request, 'Off'))
track.append(seq)
delayDeletes.append(DelayDelete.DelayDelete(suit, 'SellbotBoss.__walkDoober'))
def __walkSuitToPoint(self, node, fromPos, toPos):
vector = Vec3(toPos - fromPos)
distance = vector.length()
time = distance / (ToontownGlobals.SuitWalkSpeed * 1.8)
return Sequence(Func(node.setPos, fromPos), Func(node.headsUp, toPos), node.posInterval(time, toPos))
def makeIntroductionMovie(self, delayDeletes):
track = Parallel()
camera.reparentTo(render)
camera.setPosHpr(0, 25, 30, 0, 0, 0)
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
dooberTrack = Parallel()
if self.doobers:
self.__doobersToPromotionPosition(self.doobers[:4], self.battleANode)
self.__doobersToPromotionPosition(self.doobers[4:], self.battleBNode)
turnPosA = ToontownGlobals.SellbotBossDooberTurnPosA
turnPosB = ToontownGlobals.SellbotBossDooberTurnPosB
self.__walkDoober(self.doobers[0], 0, turnPosA, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[1], 4, turnPosA, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[2], 8, turnPosA, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[3], 12, turnPosA, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[7], 2, turnPosB, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[6], 6, turnPosB, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[5], 10, turnPosB, dooberTrack, delayDeletes)
self.__walkDoober(self.doobers[4], 14, turnPosB, dooberTrack, delayDeletes)
toonTrack = Parallel()
self.__toonsToPromotionPosition(self.toonsA, self.battleANode)
self.__toonsToPromotionPosition(self.toonsB, self.battleBNode)
delay = 0
for toonId in self.toonsA:
self.__walkToonToPromotion(toonId, delay, self.toonsEnterA, toonTrack, delayDeletes)
delay += 1
for toonId in self.toonsB:
self.__walkToonToPromotion(toonId, delay, self.toonsEnterB, toonTrack, delayDeletes)
delay += 1
toonTrack.append(Sequence(Wait(delay), self.closeDoors))
self.rampA.request('extended')
self.rampB.request('extended')
self.rampC.request('retracted')
self.clearChat()
self.cagedToon.clearChat()
promoteDoobers = TTLocalizer.BossCogPromoteDoobers % SuitDNA.getDeptFullnameP(self.style.dept)
doobersAway = TTLocalizer.BossCogDoobersAway[self.style.dept]
welcomeToons = TTLocalizer.BossCogWelcomeToons
promoteToons = TTLocalizer.BossCogPromoteToons % SuitDNA.getDeptFullnameP(self.style.dept)
discoverToons = TTLocalizer.BossCogDiscoverToons
attackToons = TTLocalizer.BossCogAttackToons
interruptBoss = TTLocalizer.CagedToonInterruptBoss
rescueQuery = TTLocalizer.CagedToonRescueQuery
bossAnimTrack = Sequence(
ActorInterval(self, 'Ff_speech', startTime=2, duration=10, loop=1),
ActorInterval(self, 'ltTurn2Wave', duration=2),
ActorInterval(self, 'wave', duration=4, loop=1),
ActorInterval(self, 'ltTurn2Wave', startTime=2, endTime=0),
ActorInterval(self, 'Ff_speech', duration=7, loop=1))
track.append(bossAnimTrack)
dialogTrack = Track(
(0, Parallel(
camera.posHprInterval(8, Point3(-22, -100, 35), Point3(-10, -13, 0), blendType='easeInOut'),
IndirectInterval(toonTrack, 0, 18))),
(5.6, Func(self.setChatAbsolute, promoteDoobers, CFSpeech)),
(9, IndirectInterval(dooberTrack, 0, 9)),
(10, Sequence(
Func(self.clearChat),
Func(camera.setPosHpr, -23.1, 15.7, 17.2, -160, -2.4, 0))),
(12, Func(self.setChatAbsolute, doobersAway, CFSpeech)),
(16, Parallel(
Func(self.clearChat),
Func(camera.setPosHpr, -25, -99, 10, -14, 10, 0),
IndirectInterval(dooberTrack, 14),
IndirectInterval(toonTrack, 30))),
(18, Func(self.setChatAbsolute, welcomeToons, CFSpeech)),
(22, Func(self.setChatAbsolute, promoteToons, CFSpeech)),
(22.2, Sequence(
Func(self.cagedToon.nametag3d.setScale, 2),
Func(self.cagedToon.setChatAbsolute, interruptBoss, CFSpeech),
ActorInterval(self.cagedToon, 'wave'),
Func(self.cagedToon.loop, 'neutral'))),
(25, Sequence(
Func(self.clearChat),
Func(self.cagedToon.clearChat),
Func(camera.setPosHpr, -12, -15, 27, -151, -15, 0),
ActorInterval(self, 'Ff_lookRt'))),
(27, Sequence(
Func(self.cagedToon.setChatAbsolute, rescueQuery, CFSpeech),
Func(camera.setPosHpr, -12, 48, 94, -26, 20, 0),
ActorInterval(self.cagedToon, 'wave'),
Func(self.cagedToon.loop, 'neutral'))),
(31, Sequence(
Func(camera.setPosHpr, -20, -35, 10, -88, 25, 0),
Func(self.setChatAbsolute, discoverToons, CFSpeech),
Func(self.cagedToon.nametag3d.setScale, 1),
Func(self.cagedToon.clearChat),
ActorInterval(self, 'turn2Fb'))),
(34, Sequence(
Func(self.clearChat),
self.loseCogSuits(self.toonsA, self.battleANode, (0, 18, 5, -180, 0, 0)),
self.loseCogSuits(self.toonsB, self.battleBNode, (0, 18, 5, -180, 0, 0)))),
(37, Sequence(
self.toonNormalEyes(self.involvedToons),
Func(camera.setPosHpr, -23.4, -145.6, 44.0, -10.0, -12.5, 0),
Func(self.loop, 'Fb_neutral'),
Func(self.rampA.request, 'retract'),
Func(self.rampB.request, 'retract'),
Parallel(self.backupToonsToBattlePosition(self.toonsA, self.battleANode),
self.backupToonsToBattlePosition(self.toonsB, self.battleBNode),
Sequence(
Wait(2),
Func(self.setChatAbsolute, attackToons, CFSpeech))))))
track.append(dialogTrack)
return Sequence(Func(self.stickToonsToFloor), track, Func(self.unstickToons), name=self.uniqueName('Introduction'))
def __makeRollToBattleTwoMovie(self):
startPos = Point3(ToontownGlobals.SellbotBossBattleOnePosHpr[0], ToontownGlobals.SellbotBossBattleOnePosHpr[1], ToontownGlobals.SellbotBossBattleOnePosHpr[2])
if self.arenaSide:
topRampPos = Point3(*ToontownGlobals.SellbotBossTopRampPosB)
topRampTurnPos = Point3(*ToontownGlobals.SellbotBossTopRampTurnPosB)
p3Pos = Point3(*ToontownGlobals.SellbotBossP3PosB)
else:
topRampPos = Point3(*ToontownGlobals.SellbotBossTopRampPosA)
topRampTurnPos = Point3(*ToontownGlobals.SellbotBossTopRampTurnPosA)
p3Pos = Point3(*ToontownGlobals.SellbotBossP3PosA)
battlePos = Point3(ToontownGlobals.SellbotBossBattleTwoPosHpr[0], ToontownGlobals.SellbotBossBattleTwoPosHpr[1], ToontownGlobals.SellbotBossBattleTwoPosHpr[2])
battleHpr = VBase3(ToontownGlobals.SellbotBossBattleTwoPosHpr[3], ToontownGlobals.SellbotBossBattleTwoPosHpr[4], ToontownGlobals.SellbotBossBattleTwoPosHpr[5])
bossTrack = Sequence()
bossTrack.append(Func(self.getGeomNode().setH, 180))
bossTrack.append(Func(self.loop, 'Fb_neutral'))
track, hpr = self.rollBossToPoint(startPos, None, topRampPos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(topRampPos, hpr, topRampTurnPos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(topRampTurnPos, hpr, p3Pos, None, 0)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(p3Pos, hpr, battlePos, None, 0)
bossTrack.append(track)
return Sequence(bossTrack, Func(self.getGeomNode().setH, 0), name=self.uniqueName('BattleTwo'))
def cagedToonMovieFunction(self, instruct, cageIndex):
self.notify.debug('cagedToonMovieFunction()')
if not (hasattr(self, 'cagedToon') and hasattr(self.cagedToon, 'nametag') and hasattr(self.cagedToon, 'nametag3d')):
return
if instruct == 1:
self.cagedToon.nametag3d.setScale(2)
elif instruct == 2:
self.cagedToon.setChatAbsolute(TTLocalizer.CagedToonDrop[cageIndex], CFSpeech)
elif instruct == 3:
self.cagedToon.nametag3d.setScale(1)
elif instruct == 4:
self.cagedToon.clearChat()
def makeEndOfBattleMovie(self, hasLocalToon):
name = self.uniqueName('CageDrop')
seq = Sequence(name=name)
seq.append(Func(self.cage.setPos, self.cagePos[self.cageIndex]))
if hasLocalToon:
seq += [Func(camera.reparentTo, render),
Func(camera.setPosHpr, self.cage, 0, -50, 0, 0, 0, 0),
Func(localAvatar.setCameraFov, ToontownGlobals.CogHQCameraFov),
Func(self.hide)]
seq += [Wait(0.5),
Parallel(self.cage.posInterval(1, self.cagePos[self.cageIndex + 1], blendType='easeInOut'), SoundInterval(self.cageLowerSfx, duration=1)),
Func(self.cagedToonMovieFunction, 1, self.cageIndex),
Func(self.cagedToonMovieFunction, 2, self.cageIndex),
Wait(3),
Func(self.cagedToonMovieFunction, 3, self.cageIndex),
Func(self.cagedToonMovieFunction, 4, self.cageIndex)]
if hasLocalToon:
seq += [Func(self.show),
Func(camera.reparentTo, localAvatar),
Func(camera.setPos, localAvatar.cameraPositions[0][0]),
Func(camera.setHpr, 0, 0, 0)]
self.cageIndex += 1
return seq
def __makeBossDamageMovie(self):
startPos = Point3(ToontownGlobals.SellbotBossBattleTwoPosHpr[0], ToontownGlobals.SellbotBossBattleTwoPosHpr[1], ToontownGlobals.SellbotBossBattleTwoPosHpr[2])
startHpr = Point3(*ToontownGlobals.SellbotBossBattleThreeHpr)
bottomPos = Point3(*ToontownGlobals.SellbotBossBottomPos)
deathPos = Point3(*ToontownGlobals.SellbotBossDeathPos)
self.setPosHpr(startPos, startHpr)
bossTrack = Sequence()
bossTrack.append(Func(self.loop, 'Fb_neutral'))
track, hpr = self.rollBossToPoint(startPos, startHpr, bottomPos, None, 1)
bossTrack.append(track)
track, hpr = self.rollBossToPoint(bottomPos, startHpr, deathPos, None, 1)
bossTrack.append(track)
duration = bossTrack.getDuration()
return bossTrack
def __talkAboutPromotion(self, speech):
if not self.localToonPromoted:
pass
elif self.prevCogSuitLevel < ToontownGlobals.MaxCogSuitLevel:
speech += TTLocalizer.CagedToonPromotion
newCogSuitLevel = localAvatar.getCogLevels()[CogDisguiseGlobals.dept2deptIndex(self.style.dept)]
if newCogSuitLevel == ToontownGlobals.MaxCogSuitLevel:
speech += TTLocalizer.CagedToonLastPromotion % (ToontownGlobals.MaxCogSuitLevel + 1)
if newCogSuitLevel in ToontownGlobals.CogSuitHPLevels:
speech += TTLocalizer.CagedToonHPBoost
else:
speech += TTLocalizer.CagedToonMaxed % (ToontownGlobals.MaxCogSuitLevel + 1)
return speech
def __makeCageOpenMovie(self):
speech = TTLocalizer.CagedToonThankYou
speech = self.__talkAboutPromotion(speech)
name = self.uniqueName('CageOpen')
seq = Sequence(
Func(self.cage.setPos, self.cagePos[4]),
Func(self.cageDoor.setHpr, VBase3(0, 0, 0)),
Func(self.cagedToon.setPos, Point3(0, -2, 0)),
Parallel(
self.cage.posInterval(0.5, self.cagePos[5], blendType='easeOut'),
SoundInterval(self.cageLowerSfx, duration=0.5)),
Parallel(
self.cageDoor.hprInterval(0.5, VBase3(0, 90, 0), blendType='easeOut'),
Sequence(SoundInterval(self.cageDoorSfx), duration=0)),
Wait(0.2),
Func(self.cagedToon.loop, 'walk'),
self.cagedToon.posInterval(0.8, Point3(0, -6, 0)),
Func(self.cagedToon.setChatAbsolute, TTLocalizer.CagedToonYippee, CFSpeech),
ActorInterval(self.cagedToon, 'jump'),
Func(self.cagedToon.loop, 'neutral'),
Func(self.cagedToon.headsUp, localAvatar),
Func(self.cagedToon.setLocalPageChat, speech, 0),
Func(camera.reparentTo, localAvatar),
Func(camera.setPos, 0, -9, 9),
Func(camera.lookAt, self.cagedToon, Point3(0, 0, 2)), name=name)
return seq
def __showOnscreenMessage(self, text):
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
self.onscreenMessage = DirectLabel(text=text, text_fg=VBase4(1, 1, 1, 1), text_align=TextNode.ACenter, relief=None, pos=(0, 0, 0.35), scale=0.1)
return
def __clearOnscreenMessage(self):
if self.onscreenMessage:
self.onscreenMessage.destroy()
self.onscreenMessage = None
return
def __showWaitingMessage(self, task):
self.__showOnscreenMessage(TTLocalizer.BuildingWaitingForVictors)
def __placeCageShadow(self):
if self.cageShadow == None:
self.cageShadow = loader.loadModel('phase_3/models/props/drop_shadow')
self.cageShadow.setPos(0, 77.9, 18)
self.cageShadow.setColorScale(1, 1, 1, 0.6)
self.cageShadow.reparentTo(render)
return
def __removeCageShadow(self):
if self.cageShadow != None:
self.cageShadow.detachNode()
return
def setCageIndex(self, cageIndex):
self.cageIndex = cageIndex
self.cage.setPos(self.cagePos[self.cageIndex])
if self.cageIndex >= 4:
self.__placeCageShadow()
else:
self.__removeCageShadow()
def loadEnvironment(self):
DistributedBossCog.DistributedBossCog.loadEnvironment(self)
self.geom = loader.loadModel('phase_9/models/cogHQ/BossRoomHQ')
self.rampA = self.__findRamp('rampA', '**/west_ramp2')
self.rampB = self.__findRamp('rampB', '**/west_ramp')
self.rampC = self.__findRamp('rampC', '**/west_ramp1')
self.cage = self.geom.find('**/cage')
elevatorEntrance = self.geom.find('**/elevatorEntrance')
elevatorEntrance.getChildren().detach()
elevatorEntrance.setScale(1)
elevatorModel = loader.loadModel('phase_9/models/cogHQ/cogHQ_elevator')
elevatorModel.reparentTo(elevatorEntrance)
self.setupElevator(elevatorModel)
pos = self.cage.getPos()
self.cagePos = []
for height in self.cageHeights:
self.cagePos.append(Point3(pos[0], pos[1], height))
self.cageDoor = self.geom.find('**/cage_door')
self.cage.setScale(1)
self.rope = Rope.Rope(name='supportChain')
self.rope.reparentTo(self.cage)
self.rope.setup(2, ((self.cage, (0.15, 0.13, 16)), (self.geom, (0.23, 78, 120))))
self.rope.ropeNode.setRenderMode(RopeNode.RMBillboard)
self.rope.ropeNode.setUvMode(RopeNode.UVDistance)
self.rope.ropeNode.setUvDirection(0)
self.rope.ropeNode.setUvScale(0.8)
self.rope.setTexture(self.cage.findTexture('hq_chain'))
self.rope.setTransparency(1)
self.promotionMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.mid')
self.betweenBattleMusic = base.loadMusic('phase_9/audio/bgm/encntr_toon_winning.mid')
self.battleTwoMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.mid')
self.geom.reparentTo(render)
def unloadEnvironment(self):
DistributedBossCog.DistributedBossCog.unloadEnvironment(self)
self.geom.removeNode()
del self.geom
del self.cage
self.rampA.requestFinalState()
self.rampB.requestFinalState()
self.rampC.requestFinalState()
del self.rampA
del self.rampB
del self.rampC
def __loadMopaths(self):
self.toonsEnterA = Mopath.Mopath()
self.toonsEnterA.loadFile('phase_9/paths/bossBattle-toonsEnterA')
self.toonsEnterA.fFaceForward = 1
self.toonsEnterA.timeScale = 35
self.toonsEnterB = Mopath.Mopath()
self.toonsEnterB.loadFile('phase_9/paths/bossBattle-toonsEnterB')
self.toonsEnterB.fFaceForward = 1
self.toonsEnterB.timeScale = 35
def __unloadMopaths(self):
self.toonsEnterA.reset()
self.toonsEnterB.reset()
def __findRamp(self, name, path):
ramp = self.geom.find(path)
children = ramp.getChildren()
animate = ramp.attachNewNode(name)
children.reparentTo(animate)
fsm = ClassicFSM.ClassicFSM(name, [
State.State('extend',
Functor(self.enterRampExtend, animate),
Functor(self.exitRampExtend, animate), [
'extended',
'retract',
'retracted']),
State.State('extended',
Functor(self.enterRampExtended, animate),
Functor(self.exitRampExtended, animate), [
'retract',
'retracted']),
State.State('retract',
Functor(self.enterRampRetract, animate),
Functor(self.exitRampRetract, animate), [
'extend',
'extended',
'retracted']),
State.State('retracted',
Functor(self.enterRampRetracted, animate),
Functor(self.exitRampRetracted, animate), [
'extend',
'extended']),
State.State('off',
Functor(self.enterRampOff, animate),
Functor(self.exitRampOff, animate))],
'off', 'off', onUndefTransition=ClassicFSM.ClassicFSM.DISALLOW)
fsm.enterInitialState()
return fsm
def enterRampExtend(self, animate):
intervalName = self.uniqueName('extend-%s' % animate.getName())
adjustTime = 2.0 * animate.getX() / 18.0
ival = Parallel(SoundInterval(self.rampSlideSfx, node=animate), animate.posInterval(adjustTime, Point3(0, 0, 0), blendType='easeInOut', name=intervalName))
ival.start()
self.storeInterval(ival, intervalName)
def exitRampExtend(self, animate):
intervalName = self.uniqueName('extend-%s' % animate.getName())
self.clearInterval(intervalName)
def enterRampExtended(self, animate):
animate.setPos(0, 0, 0)
def exitRampExtended(self, animate):
pass
def enterRampRetract(self, animate):
intervalName = self.uniqueName('retract-%s' % animate.getName())
adjustTime = 2.0 * (18 - animate.getX()) / 18.0
ival = Parallel(SoundInterval(self.rampSlideSfx, node=animate), animate.posInterval(adjustTime, Point3(18, 0, 0), blendType='easeInOut', name=intervalName))
ival.start()
self.storeInterval(ival, intervalName)
def exitRampRetract(self, animate):
intervalName = self.uniqueName('retract-%s' % animate.getName())
self.clearInterval(intervalName)
def enterRampRetracted(self, animate):
animate.setPos(18, 0, 0)
def exitRampRetracted(self, animate):
pass
def enterRampOff(self, animate):
pass
def exitRampOff(self, animate):
pass
def enterOff(self):
DistributedBossCog.DistributedBossCog.enterOff(self)
if self.cagedToon:
self.cagedToon.clearChat()
if self.rampA:
self.rampA.request('off')
if self.rampB:
self.rampB.request('off')
if self.rampC:
self.rampC.request('off')
def enterWaitForToons(self):
DistributedBossCog.DistributedBossCog.enterWaitForToons(self)
self.geom.hide()
self.cagedToon.removeActive()
def exitWaitForToons(self):
DistributedBossCog.DistributedBossCog.exitWaitForToons(self)
self.geom.show()
self.cagedToon.addActive()
def enterElevator(self):
DistributedBossCog.DistributedBossCog.enterElevator(self)
self.rampA.request('extended')
self.rampB.request('extended')
self.rampC.request('retracted')
self.setCageIndex(0)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr)
self.happy = 1
self.raised = 1
self.forward = 1
self.doAnimate()
self.cagedToon.removeActive()
def exitElevator(self):
DistributedBossCog.DistributedBossCog.exitElevator(self)
self.cagedToon.addActive()
def enterIntroduction(self):
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr)
self.stopAnimate()
DistributedBossCog.DistributedBossCog.enterIntroduction(self)
self.rampA.request('extended')
self.rampB.request('extended')
self.rampC.request('retracted')
self.setCageIndex(0)
base.playMusic(self.promotionMusic, looping=1, volume=0.9)
def exitIntroduction(self):
DistributedBossCog.DistributedBossCog.exitIntroduction(self)
self.promotionMusic.stop()
def enterBattleOne(self):
DistributedBossCog.DistributedBossCog.enterBattleOne(self)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr)
self.clearChat()
self.cagedToon.clearChat()
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('retract')
if self.battleA == None or self.battleB == None:
cageIndex = 1
else:
cageIndex = 0
self.setCageIndex(cageIndex)
return
def exitBattleOne(self):
DistributedBossCog.DistributedBossCog.exitBattleOne(self)
def enterRollToBattleTwo(self):
self.disableToonCollision()
self.releaseToons()
if self.arenaSide:
self.rampA.request('retract')
self.rampB.request('extend')
else:
self.rampA.request('extend')
self.rampB.request('retract')
self.rampC.request('retract')
self.reparentTo(render)
self.setCageIndex(2)
self.stickBossToFloor()
intervalName = 'RollToBattleTwo'
seq = Sequence(self.__makeRollToBattleTwoMovie(), Func(self.__onToPrepareBattleTwo), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
self.__showEasyBarrels()
taskMgr.doMethodLater(0.5, self.enableToonCollision, 'enableToonCollision')
def __onToPrepareBattleTwo(self):
self.disableToonCollision()
self.unstickBoss()
self.setPosHpr(*ToontownGlobals.SellbotBossBattleTwoPosHpr)
self.doneBarrier('RollToBattleTwo')
def exitRollToBattleTwo(self):
self.unstickBoss()
intervalName = 'RollToBattleTwo'
self.clearInterval(intervalName)
self.betweenBattleMusic.stop()
def disableToonCollision(self):
base.localAvatar.collisionsOff()
def enableToonCollision(self, task):
base.localAvatar.collisionsOn()
def enterPrepareBattleTwo(self):
self.cleanupIntervals()
self.__hideEasyBarrels()
self.controlToons()
self.clearChat()
self.cagedToon.clearChat()
self.reparentTo(render)
if self.arenaSide:
self.rampA.request('retract')
self.rampB.request('extend')
else:
self.rampA.request('extend')
self.rampB.request('retract')
self.rampC.request('retract')
self.reparentTo(render)
self.setCageIndex(2)
camera.reparentTo(render)
camera.setPosHpr(self.cage, 0, -17, 3.3, 0, 0, 0)
(localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov),)
self.hide()
self.acceptOnce('doneChatPage', self.__onToBattleTwo)
self.cagedToon.setLocalPageChat(TTLocalizer.CagedToonPrepareBattleTwo, 1)
base.playMusic(self.stingMusic, looping=0, volume=1.0)
taskMgr.doMethodLater(0.5, self.enableToonCollision, 'enableToonCollision')
def __onToBattleTwo(self, elapsed):
self.doneBarrier('PrepareBattleTwo')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleTwo(self):
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
self.__clearOnscreenMessage()
self.stingMusic.stop()
def enterBattleTwo(self):
self.cleanupIntervals()
mult = ToontownBattleGlobals.getBossBattleCreditMultiplier(2)
localAvatar.inventory.setBattleCreditMultiplier(mult)
self.reparentTo(render)
self.setPosHpr(*ToontownGlobals.SellbotBossBattleTwoPosHpr)
self.clearChat()
self.cagedToon.clearChat()
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('retract')
self.releaseToons()
self.toonsToBattlePosition(self.toonsA, self.battleANode)
self.toonsToBattlePosition(self.toonsB, self.battleBNode)
if self.battleA == None or self.battleB == None:
cageIndex = 3
else:
cageIndex = 2
self.setCageIndex(cageIndex)
base.playMusic(self.battleTwoMusic, looping=1, volume=0.9)
return
def exitBattleTwo(self):
intervalName = self.uniqueName('cageDrop')
self.clearInterval(intervalName)
self.cleanupBattles()
self.battleTwoMusic.stop()
localAvatar.inventory.setBattleCreditMultiplier(1)
def enterPrepareBattleThree(self):
self.cleanupIntervals()
self.controlToons()
self.clearChat()
self.cagedToon.clearChat()
self.reparentTo(render)
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.setCageIndex(4)
camera.reparentTo(render)
camera.setPosHpr(self.cage, 0, -17, 3.3, 0, 0, 0)
(localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov),)
self.hide()
self.acceptOnce('doneChatPage', self.__onToBattleThree)
self.cagedToon.setLocalPageChat(TTLocalizer.CagedToonPrepareBattleThree, 1)
base.playMusic(self.betweenBattleMusic, looping=1, volume=0.9)
def __onToBattleThree(self, elapsed):
self.doneBarrier('PrepareBattleThree')
taskMgr.doMethodLater(1, self.__showWaitingMessage, self.uniqueName('WaitingMessage'))
def exitPrepareBattleThree(self):
self.show()
taskMgr.remove(self.uniqueName('WaitingMessage'))
self.ignore('doneChatPage')
intervalName = 'PrepareBattleThree'
self.clearInterval(intervalName)
self.__clearOnscreenMessage()
self.betweenBattleMusic.stop()
def enterBattleThree(self):
DistributedBossCog.DistributedBossCog.enterBattleThree(self)
self.clearChat()
self.cagedToon.clearChat()
self.reparentTo(render)
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.setCageIndex(4)
self.happy = 0
self.raised = 1
self.forward = 1
self.doAnimate()
self.accept('enterCage', self.__touchedCage)
self.accept('pieSplat', self.__pieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
self.accept('begin-pie', self.__foundPieButton)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
taskMgr.doMethodLater(30, self.__howToGetPies, self.uniqueName('PieAdvice'))
self.stickBossToFloor()
self.bossDamageMovie = self.__makeBossDamageMovie()
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.bossDamageMovie.setDoneEvent(bossDoneEventName)
self.acceptOnce(bossDoneEventName, self.__doneBattleThree)
self.resetMaxDamage()
self.bossDamageToMovie = self.bossDamageMovie.getDuration() / self.bossMaxDamage
self.bossDamageMovie.setT(self.bossDamage * self.bossDamageToMovie)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9)
def __doneBattleThree(self):
self.setState('NearVictory')
self.unstickBoss()
def exitBattleThree(self):
DistributedBossCog.DistributedBossCog.exitBattleThree(self)
bossDoneEventName = self.uniqueName('DestroyedBoss')
self.ignore(bossDoneEventName)
taskMgr.remove(self.uniqueName('StandUp'))
self.ignore('enterCage')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.ignore('begin-pie')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.__removeCageShadow()
self.bossDamageMovie.finish()
self.bossDamageMovie = None
self.unstickBoss()
taskName = 'RecoverBossDamage'
taskMgr.remove(taskName)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
return
def enterNearVictory(self):
self.cleanupIntervals()
self.reparentTo(render)
self.setPos(*ToontownGlobals.SellbotBossDeathPos)
self.setHpr(*ToontownGlobals.SellbotBossBattleThreeHpr)
self.clearChat()
self.cagedToon.clearChat()
self.setCageIndex(4)
self.releaseToons(finalBattle=1)
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.accept('enterCage', self.__touchedCage)
self.accept('pieSplat', self.__finalPieSplat)
self.accept('localPieSplat', self.__localPieSplat)
self.accept('outOfPies', self.__outOfPies)
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.happy = 0
self.raised = 0
self.forward = 1
self.doAnimate()
self.setDizzy(1)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def exitNearVictory(self):
self.ignore('enterCage')
self.ignore('pieSplat')
self.ignore('localPieSplat')
self.ignore('outOfPies')
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.__removeCageShadow()
self.setDizzy(0)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterVictory(self):
self.cleanupIntervals()
localAvatar.setCameraFov(ToontownGlobals.BossBattleCameraFov)
self.reparentTo(render)
self.setPos(*ToontownGlobals.SellbotBossDeathPos)
self.setHpr(*ToontownGlobals.SellbotBossBattleThreeHpr)
self.clearChat()
self.cagedToon.clearChat()
self.setCageIndex(4)
self.releaseToons(finalBattle=1)
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.happy = 0
self.raised = 0
self.forward = 1
self.doAnimate('Fb_fall', now=1)
self.acceptOnce(self.animDoneEvent, self.__continueVictory)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __continueVictory(self):
self.stopAnimate()
self.stash()
self.doneBarrier('Victory')
def exitVictory(self):
self.stopAnimate()
self.unstash()
self.__removeCageShadow()
localAvatar.setCameraFov(ToontownGlobals.CogHQCameraFov)
self.battleThreeMusicTime = self.battleThreeMusic.getTime()
self.battleThreeMusic.stop()
def enterReward(self):
self.cleanupIntervals()
self.clearChat()
self.cagedToon.clearChat()
self.stash()
self.stopAnimate()
self.setCageIndex(4)
self.releaseToons(finalBattle=1)
self.toMovieMode()
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
panelName = self.uniqueName('reward')
self.rewardPanel = RewardPanel.RewardPanel(panelName)
victory, camVictory, skipper = MovieToonVictory.doToonVictory(1, self.involvedToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, allowGroupShot=0, uberList=self.uberList, noSkip=True)
ival = Sequence(Parallel(victory, camVictory), Func(self.__doneReward))
intervalName = 'RewardMovie'
delayDeletes = []
for toonId in self.involvedToons:
toon = self.cr.doId2do.get(toonId)
if toon:
delayDeletes.append(DelayDelete.DelayDelete(toon, 'SellbotBoss.enterReward'))
ival.delayDeletes = delayDeletes
ival.start()
self.storeInterval(ival, intervalName)
base.playMusic(self.battleThreeMusic, looping=1, volume=0.9, time=self.battleThreeMusicTime)
def __doneReward(self):
self.doneBarrier('Reward')
self.toWalkMode()
def exitReward(self):
intervalName = 'RewardMovie'
self.clearInterval(intervalName)
self.unstash()
self.rewardPanel.destroy()
del self.rewardPanel
self.__removeCageShadow()
self.battleThreeMusicTime = 0
self.battleThreeMusic.stop()
def enterEpilogue(self):
self.cleanupIntervals()
self.clearChat()
self.cagedToon.clearChat()
self.stash()
self.stopAnimate()
self.setCageIndex(4)
self.controlToons()
self.rampA.request('retract')
self.rampB.request('retract')
self.rampC.request('extend')
self.__arrangeToonsAroundCage()
camera.reparentTo(render)
camera.setPosHpr(-24, 52, 27.5, -53, -13, 0)
intervalName = 'EpilogueMovie'
seq = Sequence(self.__makeCageOpenMovie(), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
self.accept('nextChatPage', self.__epilogueChatNext)
self.accept('doneChatPage', self.__epilogueChatDone)
base.playMusic(self.epilogueMusic, looping=1, volume=0.9)
def __epilogueChatNext(self, pageNumber, elapsed):
if pageNumber == 2:
if self.cagedToon.style.torso[1] == 'd':
track = ActorInterval(self.cagedToon, 'curtsy')
else:
track = ActorInterval(self.cagedToon, 'bow')
track = Sequence(track, Func(self.cagedToon.loop, 'neutral'))
intervalName = 'EpilogueMovieToonAnim'
self.storeInterval(track, intervalName)
track.start()
def __epilogueChatDone(self, elapsed):
self.cagedToon.setChatAbsolute(TTLocalizer.CagedToonGoodbye, CFSpeech)
self.ignore('nextChatPage')
self.ignore('doneChatPage')
intervalName = 'EpilogueMovieToonAnim'
self.clearInterval(intervalName)
track = Parallel(Sequence(ActorInterval(self.cagedToon, 'wave'), Func(self.cagedToon.loop, 'neutral')), Sequence(Wait(0.5), Func(self.localToonToSafeZone)))
self.storeInterval(track, intervalName)
track.start()
def exitEpilogue(self):
self.clearInterval('EpilogueMovieToonAnim')
self.unstash()
self.__removeCageShadow()
self.epilogueMusic.stop()
def __arrangeToonsAroundCage(self):
radius = 15
numToons = len(self.involvedToons)
center = (numToons - 1) / 2.0
for i in range(numToons):
toon = base.cr.doId2do.get(self.involvedToons[i])
if toon:
angle = 270 - 15 * (i - center)
radians = angle * math.pi / 180.0
x = math.cos(radians) * radius
y = math.sin(radians) * radius
toon.setPos(self.cage, x, y, 0)
toon.setZ(18.0)
toon.headsUp(self.cage)
def enterFrolic(self):
DistributedBossCog.DistributedBossCog.enterFrolic(self)
self.setPosHpr(*ToontownGlobals.SellbotBossBattleOnePosHpr)
def doorACallback(self, isOpen):
if self.insidesANodePath:
if isOpen:
self.insidesANodePath.unstash()
else:
self.insidesANodePath.stash()
def doorBCallback(self, isOpen):
if self.insidesBNodePath:
if isOpen:
self.insidesBNodePath.unstash()
else:
self.insidesBNodePath.stash()
def __toonsToPromotionPosition(self, toonIds, battleNode):
points = BattleBase.BattleBase.toonPoints[len(toonIds) - 1]
for i in range(len(toonIds)):
toon = base.cr.doId2do.get(toonIds[i])
if toon:
toon.reparentTo(render)
pos, h = points[i]
toon.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0)
def __doobersToPromotionPosition(self, doobers, battleNode):
points = BattleBase.BattleBase.toonPoints[len(doobers) - 1]
for i in range(len(doobers)):
suit = doobers[i]
suit.fsm.request('neutral')
suit.loop('neutral')
pos, h = points[i]
suit.setPosHpr(battleNode, pos[0], pos[1] + 10, pos[2], h, 0, 0)
def __touchedCage(self, entry):
self.sendUpdate('touchCage', [])
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
base.playSfx(self.piesRestockSfx)
if not self.everThrownPie:
taskMgr.doMethodLater(30, self.__howToThrowPies, self.uniqueName('PieAdvice'))
def __outOfPies(self):
self.__showOnscreenMessage(TTLocalizer.BossBattleNeedMorePies)
taskMgr.doMethodLater(20, self.__howToGetPies, self.uniqueName('PieAdvice'))
def __howToGetPies(self, task):
self.__showOnscreenMessage(TTLocalizer.BossBattleHowToGetPies)
def __howToThrowPies(self, task):
self.__showOnscreenMessage(TTLocalizer.BossBattleHowToThrowPies)
def __foundPieButton(self):
self.everThrownPie = 1
self.__clearOnscreenMessage()
taskMgr.remove(self.uniqueName('PieAdvice'))
def __pieSplat(self, toon, pieCode):
if base.config.GetBool('easy-vp', 0):
if not self.dizzy:
pieCode = ToontownGlobals.PieCodeBossInsides
if pieCode == ToontownGlobals.PieCodeBossInsides:
if toon == localAvatar:
self.d_hitBossInsides()
self.flashRed()
elif pieCode == ToontownGlobals.PieCodeBossCog:
if toon == localAvatar:
self.d_hitBoss(1)
if self.dizzy:
self.flashRed()
self.doAnimate('hit', now=1)
def __localPieSplat(self, pieCode, entry):
if pieCode != ToontownGlobals.PieCodeToon:
return
avatarDoId = entry.getIntoNodePath().getNetTag('avatarDoId')
if avatarDoId == '':
self.notify.warning('Toon %s has no avatarDoId tag.' % repr(entry.getIntoNodePath()))
return
doId = int(avatarDoId)
if doId != localAvatar.doId:
self.d_hitToon(doId)
def __finalPieSplat(self, toon, pieCode):
if pieCode != ToontownGlobals.PieCodeBossCog:
return
self.sendUpdate('finalPieSplat', [])
self.ignore('pieSplat')
def cagedToonBattleThree(self, index, avId):
str = TTLocalizer.CagedToonBattleThree.get(index)
if str:
toonName = ''
if avId:
toon = self.cr.doId2do.get(avId)
if not toon:
self.cagedToon.clearChat()
return
toonName = toon.getName()
text = str % {'toon': toonName}
self.cagedToon.setChatAbsolute(text, CFSpeech | CFTimeout)
else:
self.cagedToon.clearChat()
def cleanupAttacks(self):
self.__cleanupStrafe()
def __cleanupStrafe(self):
if self.strafeInterval:
self.strafeInterval.finish()
self.strafeInterval = None
return
def doStrafe(self, side, direction):
gearRoot = self.rotateNode.attachNewNode('gearRoot')
if side == 0:
gearRoot.setPos(0, -7, 3)
gearRoot.setHpr(180, 0, 0)
door = self.doorA
else:
gearRoot.setPos(0, 7, 3)
door = self.doorB
gearRoot.setTag('attackCode', str(ToontownGlobals.BossCogStrafeAttack))
gearModel = self.getGearFrisbee()
gearModel.setScale(0.1)
t = self.getBossDamage() / 100.0
gearTrack = Parallel()
numGears = int(4 + 6 * t + 0.5)
time = 5.0 - 4.0 * t
spread = 60 * math.pi / 180.0
if direction == 1:
spread = -spread
dist = 50
rate = time / numGears
for i in range(numGears):
node = gearRoot.attachNewNode(str(i))
node.hide()
node.setPos(0, 0, 0)
gear = gearModel.instanceTo(node)
angle = (float(i) / (numGears - 1) - 0.5) * spread
x = dist * math.sin(angle)
y = dist * math.cos(angle)
h = random.uniform(-720, 720)
gearTrack.append(Sequence(Wait(i * rate), Func(node.show), Parallel(node.posInterval(1, Point3(x, y, 0), fluid=1), node.hprInterval(1, VBase3(h, 0, 0), fluid=1), Sequence(SoundInterval(self.strafeSfx[i], volume=0.2, node=self), duration=0)), Func(node.detachNode)))
seq = Sequence(Func(door.request, 'open'), Wait(0.7), gearTrack, Func(door.request, 'close'))
self.__cleanupStrafe()
self.strafeInterval = seq
seq.start()
def __showEasyBarrels(self):
barrelNodes = hidden.findAllMatches('**/Distributed*Barrel-*')
if not barrelNodes or barrelNodes.isEmpty():
return
if render.find('barrelsRootNode'):
self.notify.warning('__showEasyBarrels(): barrelsRootNode already exists')
return
self.barrelsRootNode = render.attachNewNode('barrelsRootNode')
self.barrelsRootNode.setPos(*SellbotBossGlobals.BarrelsStartPos)
if self.arenaSide == 0:
self.barrelsRootNode.setHpr(180, 0, 0)
else:
self.barrelsRootNode.setHpr(0, 0, 0)
for i, barrelNode in enumerate(barrelNodes):
barrel = base.cr.doId2do.get(int(barrelNode.getNetTag('doId')))
SellbotBossGlobals.setBarrelAttr(barrel, barrel.entId)
if hasattr(barrel, 'applyLabel'):
barrel.applyLabel()
barrel.setPosHpr(barrel.pos, barrel.hpr)
barrel.reparentTo(self.barrelsRootNode)
intervalName = 'MakeBarrelsAppear'
seq = Sequence(LerpPosInterval(self.barrelsRootNode, 0.5, Vec3(*SellbotBossGlobals.BarrelsFinalPos), blendType='easeInOut'), name=intervalName)
seq.start()
self.storeInterval(seq, intervalName)
def __hideEasyBarrels(self):
if hasattr(self, 'barrelsRootNode'):
self.barrelsRootNode.removeNode()
intervalName = 'MakeBarrelsAppear'
self.clearInterval(intervalName)
def toonPromoted(self, promoted):
self.localToonPromoted = promoted
| {
"content_hash": "2cd5bdec68d0cf3ee6f8f3fc45b927e6",
"timestamp": "",
"source": "github",
"line_count": 1284,
"max_line_length": 325,
"avg_line_length": 42.835669781931465,
"alnum_prop": 0.640733804839912,
"repo_name": "ksmit799/Toontown-Source",
"id": "09c05b132b55184b8e0672c07ecab5541f3cbed3",
"size": "55001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "toontown/suit/DistributedSellbotBoss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1441"
},
{
"name": "PLSQL",
"bytes": "901"
},
{
"name": "Python",
"bytes": "15617225"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import vtk
from vtk.test import Testing
def load_geojson(input_string, feature_properties={}):
'''Parses input_string with vtkGeoJSONReader, returns vtkPolyData
feature_properties is a dictionary of name-default_values
to attach as cell data in the returned vtkPolyData.
'''
reader = vtk.vtkGeoJSONReader()
#reader.DebugOn()
reader.StringInputModeOn()
reader.SetStringInput(input_string)
for name,default_value in feature_properties.items():
reader.AddFeatureProperty(name, default_value)
reader.Update()
return reader.GetOutput()
if __name__ == '__main__' :
# Use feature collection example taken from the geojson spec.
# Coped from http://geojson.org/geojson-spec.html (October 2014).
# Features are in/near the island of Summatra (in western Indonesia).
input_string = \
"""
{ "type": "FeatureCollection",
"features": [
{ "type": "Feature",
"geometry": {"type": "Point", "coordinates": [102.0, 0.5]},
"properties": {"prop0": "value0"}
},
{ "type": "Feature",
"geometry": {
"type": "LineString",
"coordinates": [
[102.0, 0.0], [103.0, 1.0], [104.0, 0.0], [105.0, 1.0]
]
},
"properties": {
"prop0": "value0",
"prop1": 0.0
}
},
{ "type": "Feature",
"geometry": {
"type": "Polygon",
"coordinates": [
[ [100.0, 0.0], [101.0, 0.0], [101.0, 1.0],
[100.0, 1.0], [100.0, 0.0] ]
]
},
"properties": {
"prop0": "value0",
"prop1": {"this": "that"}
}
}
]
}
"""
prop0_default = vtk.vtkVariant('default')
feature_properties = {'prop0': prop0_default}
polydata = load_geojson(input_string, feature_properties)
if polydata is None:
print('Failed to read input string and return vtkPolyData')
sys.exit(1)
num_errors = 0
# Check cell counts
expected_verts = 1
expected_lines = 1
expected_polys = 1
num_verts = polydata.GetNumberOfVerts()
if num_verts != expected_verts:
print('Wrong number of verts: returned %s, should be %s' % \
(num_verts, expected_verts))
num_errors += 1
num_lines = polydata.GetNumberOfLines()
if num_lines != expected_lines:
print('Wrong number of lines: returned %s, should be %s' % \
(num_lines, expected_lines))
num_errors += 1
else:
# Check number of points in the (first) polyline
id_list = vtk.vtkIdList()
polydata.GetLines().GetCell(0, id_list)
if id_list.GetNumberOfIds() != 4:
print('Wrong number of points in line 0: returned %s, should be %s' % \
(id_list.GetNumberOfIds(), 4))
num_errors += 1
num_polys = polydata.GetNumberOfPolys()
if num_polys != expected_polys:
print('Wrong number of polys: returned %s, should be %s' % \
(num_polys, expected_polys))
num_errors += 1
else:
# Check number of points in the (first) polygon
id_list = vtk.vtkIdList()
polydata.GetPolys().GetCell(0, id_list)
if id_list.GetNumberOfIds() != 4:
print('Wrong number of points in poly 0: returned %s, should be %s' % \
(id_list.GetNumberOfIds(), 4))
num_errors += 1
# Check cell data
cell_data = polydata.GetCellData()
# All polydata generated from GeoJSON have feature-id array
feature_id_array = cell_data.GetAbstractArray('feature-id')
if feature_id_array is None:
print('feature-id array missing')
num_errors += 1
# Test case also specified a prop0 array
prop0_array = cell_data.GetAbstractArray('prop0')
if prop0_array is None:
print('prop0 array missing')
num_errors += 1
print('num_errors:', num_errors)
sys.exit(num_errors)
| {
"content_hash": "a6bd86a38a53d9fd5ccbcd20f10921e6",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 77,
"avg_line_length": 31.04724409448819,
"alnum_prop": 0.585087496829825,
"repo_name": "hlzz/dotfiles",
"id": "2313c61d532a3c71f5d5111af8cec43746ecd768",
"size": "3943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/IO/GeoJSON/Testing/Python/TestGeoJSONReader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
"""
Base object supporting the storage of custom fields as attributes
"""
from __future__ import unicode_literals
import sys
class TroveboxObject(object):
""" Base object supporting the storage of custom fields as attributes """
_type = "None"
def __init__(self, client, json_dict):
self.id = None
self.name = None
self._client = client
self._json_dict = json_dict
self._set_fields(json_dict)
def _set_fields(self, json_dict):
""" Set this object's attributes specified in json_dict """
for key, value in json_dict.items():
if not key.startswith("_"):
setattr(self, key, value)
def _replace_fields(self, json_dict):
"""
Delete this object's attributes, and replace with
those in json_dict.
"""
for key in self._json_dict.keys():
if not key.startswith("_"):
delattr(self, key)
self._json_dict = json_dict
self._set_fields(json_dict)
def _delete_fields(self):
"""
Delete this object's attributes, including name and id
"""
for key in self._json_dict.keys():
if not key.startswith("_"):
delattr(self, key)
self._json_dict = {}
self.id = None
self.name = None
def __repr__(self):
if self.name is not None:
value = "<%s name='%s'>" % (self.__class__.__name__, self.name)
elif self.id is not None:
value = "<%s id='%s'>" % (self.__class__.__name__, self.id)
else:
value = "<%s>" % (self.__class__.__name__)
# Python2 requires a bytestring
if sys.version < '3':
return value.encode('utf-8')
else: # pragma: no cover
return value
def get_fields(self):
""" Returns this object's attributes """
return self._json_dict
def get_type(self):
""" Return this object's type (eg. "photo") """
return self._type
| {
"content_hash": "cf8ce7f9f4ef79510ce3fdd76801f535",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 77,
"avg_line_length": 30.772727272727273,
"alnum_prop": 0.5376661742983752,
"repo_name": "photo/openphoto-python",
"id": "bd525822a4452dd3503fcde00acb63c0852d44e9",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trovebox/objects/trovebox_object.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "193614"
},
{
"name": "Shell",
"bytes": "1188"
}
],
"symlink_target": ""
} |
import requests
from bs4 import BeautifulSoup
import processor
from datetime import datetime
def parse( url ):
r = requests.get( url )
if r.status_code == 404:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
r.encoding = 'UTF-8'
soup = BeautifulSoup( r.text, "html.parser" )
article = soup.find( class_ = 'content__wrapper' )
if article == None:
return processor.create_dictionary('', url, r.status_code, [u''], [u''], u'', u'', u'', u'', [u''], [u''])
processor.decompose_all( article.find_all( 'script' ) )
categories = processor.collect_categories( article.find_all( class_ = 'typography__category' ) )
datetime_list = processor.collect_datetime( article.find( class_ = 'meta-content' ) )
author = processor.collect_text( article.find( class_ = 'typography__author' ) )
title = processor.collect_text( article.find( class_ = 'content__title' ) )
ingress = processor.collect_text( article.find( class_ = 'content__intro' ) )
text = processor.collect_text( article.find( class_ = 'content__body' ))
images = processor.collect_images_by_parent( article.find_all( class_ = 'content__main-gallery' ), '' )
captions = [None]
for caption_div in article.find_all( class_ = 'content__main-gallery' ):
caption = BeautifulSoup( caption_div.find( 'a' )['data-caption'], "html.parser" )
captions.append( processor.collect_text( caption ) )
captions.pop(0)
return processor.create_dictionary('Seura', url, r.status_code, categories, datetime_list, author, title, ingress, text, images, captions)
if __name__ == '__main__':
parse("http://seura.fi/puheenaihe/ajankohtaista/vasemmisto-kehuu-kokoomusta-harjoittavat-rehellisesti-politiikkaa-joka-on-ajanut-suomen-lamaan/?shared=43026-ad87bd06-500", file('seura.txt', 'w'))
| {
"content_hash": "e136510da9fddd4e27f200b7f24babcb",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 196,
"avg_line_length": 47.44736842105263,
"alnum_prop": 0.6860787576261785,
"repo_name": "HIIT/mediacollection",
"id": "805f83fecffdb8d384b92ae1fc2420eb8c1602a5",
"size": "1828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sites/seura.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "197865"
}
],
"symlink_target": ""
} |
"""
AAFigure directive for reStructuredText.
This is open source software under the BSD license. See LICENSE.txt for more
details.
"""
from aafigure import process, render, UnsupportedFormatError, AsciiArtImage
| {
"content_hash": "c74694addec27586e2100fdab74e2449",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 76,
"avg_line_length": 26.625,
"alnum_prop": 0.8028169014084507,
"repo_name": "garyvdm/aafigure",
"id": "81e72dc4fd781476e91a8b5dfe3b185d6f9c469b",
"size": "213",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aafigure/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "92401"
}
],
"symlink_target": ""
} |
'''
I really hate repeating myself. These are helpers that avoid typing the
whole thing over and over when implementing additional template tags
They help implementing tags of the following forms::
{% tag as var_name %}
{% tag of template_var as var_name %}
{% tag of template_var as var_name arg1,arg2,kwarg3=4 %}
'''
from django import template
def _parse_args(argstr, context=None):
try:
args = {}
for token in argstr.split(','):
if '=' in token:
k, v = token.split('=', 1)
if context:
try:
args[k] = template.Variable(v).resolve(context)
except template.VariableDoesNotExist:
args[k] = v
else:
args[k] = v
else:
args[token] = True
return args
except TypeError:
raise template.TemplateSyntaxError('Malformed arguments')
def do_simple_node_with_var_and_args_helper(cls):
def _func(parser, token):
try:
tag_name, of_, in_var_name, args = token.contents.split()
except ValueError:
try:
tag_name, of_, in_var_name = token.contents.split()
args = ''
except ValueError:
raise template.TemplateSyntaxError, 'Invalid syntax for %s node: %s' % (cls.__name__, token.contents)
return cls(tag_name, in_var_name, args)
return _func
class SimpleNodeWithVarAndArgs(template.Node):
def __init__(self, tag_name, in_var_name, args):
self.tag_name = tag_name
self.in_var = template.Variable(in_var_name)
self.args = args
def render(self, context):
self.render_context = context
try:
instance = self.in_var.resolve(context)
except template.VariableDoesNotExist:
return ''
return self.what(instance, _parse_args(self.args, context))
def do_simple_node_with_var_helper(cls):
def _func(parser, token):
try:
tag_name, of_, in_var_name = token.contents.split()
except ValueError:
raise template.TemplateSyntaxError, 'Invalid syntax for %s node: %s' % (cls.__name__, token.contents)
return cls(tag_name, in_var_name)
return _func
class SimpleNodeWithVar(template.Node):
def __init__(self, tag_name, in_var_name):
self.tag_name = tag_name
self.in_var = template.Variable(in_var_name)
def render(self, context):
self.render_context = context
try:
instance = self.in_var.resolve(context)
except template.VariableDoesNotExist:
return ''
return self.what(instance)
def do_simple_assignment_node_helper(cls):
def _func(parser, token):
try:
tag_name, as_, var_name = token.contents.split()
except ValueError:
raise template.TemplateSyntaxError, 'Invalid syntax for %s node: %s' % (cls.__name__, token.contents)
return cls(tag_name, var_name)
return _func
class SimpleAssignmentNode(template.Node):
def __init__(self, tag_name, var_name):
self.tag_name = tag_name
self.var_name = var_name
def render(self, context):
self.render_context = context
context[self.var_name] = self.what()
return ''
def do_simple_assignment_node_with_var_helper(cls):
def _func(parser, token):
try:
tag_name, of_, in_var_name, as_, var_name = token.contents.split()
except ValueError:
raise template.TemplateSyntaxError, 'Invalid syntax for %s node: %s' % (cls.__name__, token.contents)
return cls(tag_name, in_var_name, var_name)
return _func
class SimpleAssignmentNodeWithVar(template.Node):
def __init__(self, tag_name, in_var_name, var_name):
self.tag_name = tag_name
self.in_var = template.Variable(in_var_name)
self.var_name = var_name
def render(self, context):
self.render_context = context
try:
instance = self.in_var.resolve(context)
except template.VariableDoesNotExist:
context[self.var_name] = []
return ''
context[self.var_name] = self.what(instance)
return ''
def do_simple_assignment_node_with_var_and_args_helper(cls):
def _func(parser, token):
try:
tag_name, of_, in_var_name, as_, var_name, args = token.contents.split()
except ValueError:
try:
tag_name, of_, in_var_name, as_, var_name = token.contents.split()
args = ''
except ValueError:
raise template.TemplateSyntaxError, 'Invalid syntax for %s node: %s' % (cls.__name__, token.contents)
return cls(tag_name, in_var_name, var_name, args)
return _func
class SimpleAssignmentNodeWithVarAndArgs(template.Node):
def __init__(self, tag_name, in_var_name, var_name, args):
self.tag_name = tag_name
self.in_var = template.Variable(in_var_name)
self.var_name = var_name
self.args = args
def render(self, context):
self.render_context = context
try:
instance = self.in_var.resolve(context)
except template.VariableDoesNotExist:
context[self.var_name] = []
return ''
context[self.var_name] = self.what(instance, _parse_args(self.args, context))
return ''
| {
"content_hash": "8f6c73949d2c16310ca78d16fa32909a",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 117,
"avg_line_length": 31.91860465116279,
"alnum_prop": 0.5876138433515483,
"repo_name": "hgrimelid/feincms",
"id": "2e4a849f9f63dcb2c9a3815ffcc9edc38e49871a",
"size": "5490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "feincms/utils/templatetags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "118395"
},
{
"name": "Python",
"bytes": "352434"
},
{
"name": "Shell",
"bytes": "599"
}
],
"symlink_target": ""
} |
from test import support
import unittest
import xdrlib
class XDRTest(unittest.TestCase):
def test_xdr(self):
p = xdrlib.Packer()
s = b'hello world'
a = [b'what', b'is', b'hapnin', b'doctor']
p.pack_int(42)
p.pack_uint(9)
p.pack_bool(True)
p.pack_bool(False)
p.pack_uhyper(45)
p.pack_float(1.9)
p.pack_double(1.9)
p.pack_string(s)
p.pack_list(range(5), p.pack_uint)
p.pack_array(a, p.pack_string)
# now verify
data = p.get_buffer()
up = xdrlib.Unpacker(data)
self.assertEqual(up.get_position(), 0)
self.assertEqual(up.unpack_int(), 42)
self.assertEqual(up.unpack_uint(), 9)
self.assertTrue(up.unpack_bool() is True)
# remember position
pos = up.get_position()
self.assertTrue(up.unpack_bool() is False)
# rewind and unpack again
up.set_position(pos)
self.assertTrue(up.unpack_bool() is False)
self.assertEqual(up.unpack_uhyper(), 45)
self.assertAlmostEqual(up.unpack_float(), 1.9)
self.assertAlmostEqual(up.unpack_double(), 1.9)
self.assertEqual(up.unpack_string(), s)
self.assertEqual(up.unpack_list(up.unpack_uint), list(range(5)))
self.assertEqual(up.unpack_array(up.unpack_string), a)
up.done()
self.assertRaises(EOFError, up.unpack_uint)
def test_main():
support.run_unittest(XDRTest)
if __name__ == "__main__":
test_main()
| {
"content_hash": "89246a97c926ef6db5ed1f49b9f9dd9d",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 27.232142857142858,
"alnum_prop": 0.5875409836065574,
"repo_name": "MalloyPower/parsing-python",
"id": "073448ccee7588bd91c10f369f93126a170c6435",
"size": "1525",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.2/Lib/test/test_xdrlib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""
cclib (http://cclib.sf.net) is (c) 2006, the cclib development team
and licensed under the LGPL (http://www.gnu.org/copyleft/lgpl.html).
gmagoon 4/5/10-4/6/10 (this notice added 4/29/10): Gregory Magoon modified this file from cclib 1.0
"""
__revision__ = "$Revision: 863 $"
# These import statements are added for the convenience of users...
# Rather than having to type:
# from cclib.parser.gaussianparser import Gaussian
# they can use:
# from cclib.parser import Gaussian
from adfparser import ADF
from gamessparser import GAMESS
from gamessukparser import GAMESSUK
from gaussianparser import Gaussian
from jaguarparser import Jaguar
from molproparser import Molpro
from orcaparser import ORCA
from mopacparser import Mopac
from mm4parser import MM4
# This allow users to type:
# from cclib.parser import ccopen
from ccopen import ccopen
| {
"content_hash": "0f0a0e1a4ba2b7b52dacd5770cb68c2c",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 99,
"avg_line_length": 31.20689655172414,
"alnum_prop": 0.7325966850828729,
"repo_name": "connie/RMG-Java",
"id": "8ebbfcf82b8b976d59b03768e12ea6087984097a",
"size": "905",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "source/cclib/parser/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6848"
},
{
"name": "CSS",
"bytes": "10846"
},
{
"name": "FORTRAN",
"bytes": "3189505"
},
{
"name": "Groff",
"bytes": "92"
},
{
"name": "HTML",
"bytes": "9622"
},
{
"name": "Java",
"bytes": "3484425"
},
{
"name": "Makefile",
"bytes": "30726"
},
{
"name": "Matlab",
"bytes": "9402"
},
{
"name": "Python",
"bytes": "428569"
},
{
"name": "Shell",
"bytes": "1427"
}
],
"symlink_target": ""
} |
"""
The sequence of triangle numbers is generated by adding the natural numbers.
So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
"""
import math
def getDivisorCount(number):
divisors = 0
squareRoot = math.sqrt(number)
for divisor in xrange(1, int(squareRoot)):
if number%divisor==0:
divisors += 2
"""
Explanation:
n = 28 has the divisors: 1,2,4,7,14,28
sqrt(28) = 5.29...
if divisor is 4, 28 % 4 = 0.
28 / 4 = 7
also, if divisor is 2, 28 % 2 = 0.
28 / 2 = 14
And so on...
Therefore, we add two counts since we're going from 1 to sqrt of number
"""
# If sqrt of the number is a whole number, we subtract 1,
# because we just counted it twice in the above loop
if squareRoot==int(squareRoot):
divisors -= 1
return divisors
triangleSize = 0
triangleNumber = 0
divisorCount = 0
while divisorCount <= 500:
triangleSize += 1
triangleNumber += triangleSize
divisorCount = getDivisorCount(triangleNumber)
print triangleNumber | {
"content_hash": "4db9f75d54dcbabaf407d902d0ba41ed",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 97,
"avg_line_length": 26.508474576271187,
"alnum_prop": 0.5927109974424553,
"repo_name": "hickeroar/project-euler",
"id": "fb9707cb2e3b896dae4dba49648518330b4a84e6",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "010/solution012.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "54504"
}
],
"symlink_target": ""
} |
import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMayaUI as OpenMayaUI
import os
from PySide import QtGui
import cvwrap.bindui
NAME_WIDGET = 'cvwrap_name'
RADIUS_WIDGET = 'cvwrap_radius'
NEW_BIND_MESH_WIDGET = 'cvwrap_newbindmesh'
BIND_FILE_WIDGET = 'cvwrap_bindfile'
MENU_ITEMS = []
def create_menuitems():
global MENU_ITEMS
if MENU_ITEMS:
# Already created
return
if int(cmds.about(v=True)) < 2016:
cmds.warning('cvWrap menus only available in Maya 2016 and higher.')
return
for menu in ['mainDeformMenu', 'mainRigDeformationsMenu']:
# Make sure the menu widgets exist first.
mel.eval('ChaDeformationsMenu MayaWindow|{0};'.format(menu))
items = cmds.menu(menu, q=True, ia=True)
for item in items:
if cmds.menuItem(item, q=True, divider=True):
section = cmds.menuItem(item, q=True, label=True)
menu_label = cmds.menuItem(item, q=True, label=True)
if menu_label == 'Wrap':
if section == 'Create':
cvwrap_item = cmds.menuItem(label="cvWrap", command=create_cvwrap,
sourceType='python', insertAfter=item, parent=menu)
cvwrap_options = cmds.menuItem(command=display_cvwrap_options,
insertAfter=cvwrap_item, parent=menu,
optionBox=True)
MENU_ITEMS.append(cvwrap_item)
MENU_ITEMS.append(cvwrap_options)
elif section == 'Edit':
submenu = cmds.menuItem(label="cvWrap", subMenu=True, insertAfter=item,
parent=menu)
MENU_ITEMS.append(submenu)
item = cmds.menuItem(label="Edit Binding", command=edit_binding,
sourceType='python', parent=submenu)
MENU_ITEMS.append(item)
item = cmds.menuItem(label="Import Binding", command=import_binding,
sourceType='python', parent=submenu)
MENU_ITEMS.append(item)
item = cmds.menuItem(label="Export Binding", command=export_binding,
sourceType='python', parent=submenu)
MENU_ITEMS.append(item)
elif menu_label == 'Cluster' and section == 'Paint Weights':
item = cmds.menuItem(label="cvWrap", command=paint_cvwrap_weights,
sourceType='python', insertAfter=item, parent=menu)
MENU_ITEMS.append(item)
def create_cvwrap(*args, **kwargs):
cmds.loadPlugin('cvwrap', qt=True)
sel = cmds.ls(sl=True)
if len(sel) >= 2:
kwargs = get_create_command_kwargs()
result = cmds.cvWrap(**kwargs)
print result
else:
raise RuntimeError("Select at least one surface and one influence object.")
def get_create_command_kwargs():
"""Gets the cvWrap command arguments either from the option box widgets or the saved
option vars. If the widgets exist, their values will be saved to the option vars.
@return A dictionary of the kwargs to the cvWrap command."""
args = {}
if cmds.textFieldGrp(NAME_WIDGET, exists=True):
args['name'] = cmds.textFieldGrp(NAME_WIDGET, q=True, text=True)
cmds.optionVar(sv=(NAME_WIDGET, args['name']))
else:
args['name'] = cmds.optionVar(q=NAME_WIDGET) or 'cvWrap#'
if cmds.floatSliderGrp(RADIUS_WIDGET, exists=True):
args['radius'] = cmds.floatSliderGrp(RADIUS_WIDGET, q=True, value=True)
cmds.optionVar(fv=(RADIUS_WIDGET, args['radius']))
else:
args['radius'] = cmds.optionVar(q=RADIUS_WIDGET)
if cmds.checkBoxGrp(NEW_BIND_MESH_WIDGET, exists=True):
if cmds.checkBoxGrp(NEW_BIND_MESH_WIDGET, q=True, v1=True):
args['newBindMesh'] = True
cmds.optionVar(iv=(NEW_BIND_MESH_WIDGET, 1))
else:
cmds.optionVar(iv=(NEW_BIND_MESH_WIDGET, 0))
else:
value = cmds.optionVar(q=NEW_BIND_MESH_WIDGET)
if value:
args['newBindMesh'] = True
if cmds.textFieldButtonGrp(BIND_FILE_WIDGET, exists=True):
bind_file = cmds.textFieldButtonGrp(BIND_FILE_WIDGET, q=True, text=True)
bind_file = os.path.expandvars(bind_file.strip())
if bind_file:
if os.path.exists(bind_file):
args['binding'] = bind_file
else:
cmds.warning('{0} does not exist.'.format(bind_file))
return args
def display_cvwrap_options(*args, **kwargs):
cmds.loadPlugin('cvwrap', qt=True)
layout = mel.eval('getOptionBox')
cmds.setParent(layout)
cmds.columnLayout(adj=True)
for widget in [NAME_WIDGET, RADIUS_WIDGET, BIND_FILE_WIDGET, NEW_BIND_MESH_WIDGET]:
# Delete the widgets so we don't create multiple controls with the same name
try:
cmds.deleteUI(widget, control=True)
except:
pass
cmds.textFieldGrp(NAME_WIDGET, label='Node name', text='cvWrap#')
radius = cmds.optionVar(q=RADIUS_WIDGET)
cmds.floatSliderGrp(RADIUS_WIDGET, label='Sample radius', field=True, minValue=0.0,
maxValue=100.0, fieldMinValue=0.0, fieldMaxValue=100.0, value=radius,
step=0.01, precision=2)
cmds.textFieldButtonGrp(BIND_FILE_WIDGET, label='Binding file ', text='', buttonLabel='Browse',
bc=display_bind_file_dialog)
use_new_bind_mesh = cmds.optionVar(q=NEW_BIND_MESH_WIDGET)
cmds.checkBoxGrp(NEW_BIND_MESH_WIDGET, numberOfCheckBoxes=1, label='Create new bind mesh',
v1=use_new_bind_mesh)
mel.eval('setOptionBoxTitle("cvWrap Options");')
mel.eval('setOptionBoxCommandName("cvWrap");')
apply_close_button = mel.eval('getOptionBoxApplyAndCloseBtn;')
cmds.button(apply_close_button, e=True, command=apply_and_close)
apply_button = mel.eval('getOptionBoxApplyBtn;')
cmds.button(apply_button, e=True, command=create_cvwrap)
reset_button = mel.eval('getOptionBoxResetBtn;')
# For some reason, the buttons in the menu only accept MEL.
cmds.button(reset_button, e=True,
command='python("import cvwrap.menu; cvwrap.menu.reset_to_defaults()");')
close_button = mel.eval('getOptionBoxCloseBtn;')
cmds.button(close_button, e=True, command=close_option_box)
save_button = mel.eval('getOptionBoxSaveBtn;')
cmds.button(save_button, e=True,
command='python("import cvwrap.menu; cvwrap.menu.get_create_command_kwargs()");')
mel.eval('showOptionBox')
def apply_and_close(*args, **kwargs):
"""Create the cvWrap deformer and close the option box."""
create_cvwrap()
mel.eval('saveOptionBoxSize')
close_option_box()
def close_option_box(*args, **kwargs):
mel.eval('hideOptionBox')
def display_bind_file_dialog(*args, **kwargs):
"""Displays the dialog to choose the binding file with which to create the cvWrap deformer."""
root_dir = cmds.workspace(q=True, rootDirectory=True)
start_directory = os.path.join(root_dir, 'data')
file_path = cmds.fileDialog2(fileFilter='*.wrap', dialogStyle=2, fileMode=1,
startingDirectory=start_directory)
if file_path:
cmds.textFieldButtonGrp(BIND_FILE_WIDGET, e=True, text=file_path[0])
def reset_to_defaults(*args, **kwargs):
"""Reset the cvWrap option box widgets to their defaults."""
cmds.textFieldGrp(NAME_WIDGET, e=True, text='cvWrap#')
cmds.floatSliderGrp(RADIUS_WIDGET, e=True, value=0)
cmds.textFieldButtonGrp(BIND_FILE_WIDGET, e=True, text='')
cmds.checkBoxGrp(NEW_BIND_MESH_WIDGET, e=True, v1=False)
def edit_binding(*args, **kwargs):
cvwrap.bindui.show()
def export_binding(*args, **kwargs):
"""Export a wrap binding from the selected wrap node or mesh."""
cmds.loadPlugin('cvwrap', qt=True)
wrap_node = get_wrap_node_from_selected()
if wrap_node:
data_dir = os.path.join(cmds.workspace(q=True, rd=True), 'data')
file_path = cmds.fileDialog2(fileFilter='*.wrap', dialogStyle=2, cap='Export Binding',
startingDirectory=data_dir, fm=0)
if file_path:
cmds.cvWrap(wrap_node, ex=file_path[0])
def import_binding(*args, **kwargs):
"""Import a wrap binding onto the selected wrap node or mesh."""
cmds.loadPlugin('cvwrap', qt=True)
wrap_node = get_wrap_node_from_selected()
if wrap_node:
data_dir = os.path.join(cmds.workspace(q=True, rd=True), 'data')
file_path = cmds.fileDialog2(fileFilter='*.wrap', dialogStyle=2, cap='Import Binding',
startingDirectory=data_dir, fm=1)
if file_path:
cmds.cvWrap(wrap_node, im=file_path[0])
def get_wrap_node_from_selected():
"""Get a wrap node from the selected geometry."""
sel = cmds.ls(sl=True) or []
if not sel:
raise RuntimeError('No cvWrap found on selected.')
if cmds.nodeType(sel[0]) == 'cvWrap':
return sel[0]
history = cmds.listHistory(sel[0], pdo=0) or []
wrap_nodes = [node for node in history if cmds.nodeType(node) == 'cvWrap']
if not wrap_nodes:
raise RuntimeError('No cvWrap node found on {0}.'.format(sel[0]))
if len(wrap_nodes) == 1:
return wrap_nodes[0]
else:
# Multiple wrap nodes are deforming the mesh. Let the user choose which one
# to use.
return QtGui.QInputDialog.getItem(None, 'Select cvWrap node', 'cvWrap node:', wrap_nodes)
def destroy_menuitems():
"""Remove the cvWrap items from the menus."""
global MENU_ITEMS
for item in MENU_ITEMS:
cmds.deleteUI(item, menuItem=True)
MENU_ITEMS = []
def paint_cvwrap_weights(*args, **kwargs):
"""Activates the paint cvWrap weights context."""
sel = cmds.ls(sl=True)
if sel:
wrap_node = get_wrap_node_from_selected()
if wrap_node:
mel.eval('artSetToolAndSelectAttr("artAttrCtx", "cvWrap.{0}.weights");'.format(
wrap_node))
| {
"content_hash": "fd7081cf6ae9ed9a3538d6dbc3b80a05",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 99,
"avg_line_length": 42.7551867219917,
"alnum_prop": 0.6134510869565217,
"repo_name": "AtonLerin/cvwrap",
"id": "c7678b02e9189edd22fe913feec83ca1e7583f29",
"size": "10304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/cvwrap/menu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9915"
},
{
"name": "C++",
"bytes": "139394"
},
{
"name": "CMake",
"bytes": "1133"
},
{
"name": "Python",
"bytes": "14408"
}
],
"symlink_target": ""
} |
import time
import secret
from datetime import datetime
from evernote.edam.notestore import NoteStore
from evernote.api.client import EvernoteClient
from evernote.edam.limits import constants
class Post(object):
"""A blog post's meta
Attributes:
title:
created:
updated:
guid:
tags:
content:
"""
title = None
guid = None
tags = []
created = None
updated = None
content = None
def __init__(self, title=None, guid=None, tags=[],
created=None, updated=None, content=None):
self.title = title
self.guid = guid
self.tags = tags
self.created = created
self.updated = updated
self.content = content
def to_dic(self):
time_fmt = "%B %d, %Y"
return dict(
title=self.title,
tags=self.tags,
created=datetime.utcfromtimestamp(self.created/1000).strftime(time_fmt),
# updated=datetime.utcfromtimestamp(self.updated/1000).strftime(time_fmt),
content=self.content
)
class EvernoteCache(object):
"""Cache evernote notes in published.
1. Evernote API wrapper
2. Cache notes
Attributes:
posts: To save post.
tags: Tags in blog notebook.
dates: To save archive dates.
last_update: Last time the sync happend.
auth_token: Evernote dev token.
client: Evernote client.
note_store: Operate object.
blog_note_guid: The blog notebook's guid in evernote.
published_tag_guid: The tag published's guid.
filter: To filter published notes in blog notebook.
spec: To get published notebook's metadata.
"""
posts = []
tags = []
dates = []
def __init__(self):
self.auth_token = secret.token
self.client = EvernoteClient(token=self.auth_token, sandbox=False)
self.note_store = self.client.get_note_store()
# Get notebook: blog's guid
notebooks = self.note_store.listNotebooks(self.auth_token)
self.blog_notebook_guid = [notebook.guid for notebook in notebooks
if notebook.name == "blog"][0]
# Get tag: published's guid
all_tags = self.note_store.listTags(self.auth_token)
self.published_tag_guid = [tag.guid for tag in all_tags
if tag.name == "Published"][0]
# Create filter in blog notebook
self.filter = NoteStore.NoteFilter()
self.filter.ascending = False
self.filter.tagGuids = [self.published_tag_guid]
self.filter.notebookGuid = self.blog_notebook_guid
# Create note result scpecification
self.spec = NoteStore.NotesMetadataResultSpec()
self.spec.includeTitle = True
self.spec.includeTagGuids = True
self.spec.includeUpdated = True
self.spec.includeCreated = True
self.spec.includeDeleted = True
self.spec.includeUpdateSequenceNum = True
# Init last update time
self.last_update = 0
# Init notebook's update sequence num
self.usn = 0
self.sync()
def update_time(self):
self.last_update = int(time.time())
def parse_enml(self, enml):
"""Convert ENML to HTML"""
# First delete xml declaration
# TODO: Finish it, no idea
pass
def prepare_store(self):
# Update tags
tags = self.note_store.listTagsByNotebook(self.auth_token, self.blog_notebook_guid)
self.tags = [{"guid": tag.guid, "name": tag.name} for tag in tags]
notes = self.note_store.findNotesMetadata(self.auth_token, self.filter,
0, constants.EDAM_USER_NOTES_MAX, self.spec)
self.posts = []
for note in notes.notes:
tag_names = self.note_store.getNoteTagNames(self.auth_token, note.guid)
# TODO: Compile ENML
note_content = self.note_store.getNoteContent(self.auth_token, note.guid)
post = Post(note.title, note.guid, tag_names, note.created,
note.updated, note_content)
self.posts.append(post)
# Sort by created time
self.posts.sort(key=lambda curr_post: curr_post.created, reverse=True)
def sync(self):
"""Sync between server cache and evernote
Cache consists of notes, tags.
"""
time_now = int(time.time())
# Check sync one time per 15min
if time_now - self.last_update < 10:
return
# Check if there are new content
sync_state = self.note_store.getSyncState(self.auth_token)
curr_usn = sync_state.updateCount
if curr_usn <= self.usn:
return
self.usn = curr_usn
self.prepare_store()
self.update_time()
def get_posts(self):
return [post.to_dic() for post in self.posts]
def get_tags(self):
return [tag["name"] for tag in self.tags]
def get_last_update(self):
return datetime.fromtimestamp(self.last_update).strftime("%Y-%m-%d %H:%M:%S")
def test():
cache = EvernoteCache()
if __name__ == "__main__":
test()
| {
"content_hash": "f4a393986abe8ef9afb14d93e7c7d91b",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 94,
"avg_line_length": 31.202380952380953,
"alnum_prop": 0.5940480732544831,
"repo_name": "ccreimondo/sweets",
"id": "a9a6604eece4627319d59ffe49f3a1ba0db5aefc",
"size": "5258",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "backup-to-evernote/blog/memcache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "6736"
}
],
"symlink_target": ""
} |
"""Base Entity for all TelldusLive entities."""
from datetime import datetime
import logging
from tellduslive import BATTERY_LOW, BATTERY_OK, BATTERY_UNKNOWN
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_VIA_DEVICE,
DEVICE_DEFAULT_NAME,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo, Entity
from .const import SIGNAL_UPDATE_ENTITY
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_UPDATED = "time_last_updated"
class TelldusLiveEntity(Entity):
"""Base class for all Telldus Live entities."""
def __init__(self, client, device_id):
"""Initialize the entity."""
self._id = device_id
self._client = client
self._name = self.device.name
self._async_unsub_dispatcher_connect = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
_LOGGER.debug("Created device %s", self)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_UPDATE_ENTITY, self._update_callback
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
@callback
def _update_callback(self):
"""Return the property of the device might have changed."""
if self.device.name:
self._name = self.device.name
self.async_write_ha_state()
@property
def device_id(self):
"""Return the id of the device."""
return self._id
@property
def device(self):
"""Return the representation of the device."""
return self._client.device(self.device_id)
@property
def _state(self):
"""Return the state of the device."""
return self.device.state
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return True
@property
def name(self):
"""Return name of device."""
return self._name or DEVICE_DEFAULT_NAME
@property
def available(self):
"""Return true if device is not offline."""
return self._client.is_available(self.device_id)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._battery_level:
attrs[ATTR_BATTERY_LEVEL] = self._battery_level
if self._last_updated:
attrs[ATTR_LAST_UPDATED] = self._last_updated
return attrs
@property
def _battery_level(self):
"""Return the battery level of a device."""
if self.device.battery == BATTERY_LOW:
return 1
if self.device.battery == BATTERY_UNKNOWN:
return None
if self.device.battery == BATTERY_OK:
return 100
return self.device.battery # Percentage
@property
def _last_updated(self):
"""Return the last update of a device."""
return (
str(datetime.fromtimestamp(self.device.lastUpdated))
if self.device.lastUpdated
else None
)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._id
@property
def device_info(self) -> DeviceInfo:
"""Return device info."""
device = self._client.device_info(self.device.device_id)
device_info = DeviceInfo(
identifiers={("tellduslive", self.device.device_id)},
name=self.device.name,
)
if (model := device.get("model")) is not None:
device_info[ATTR_MODEL] = model.title()
if (protocol := device.get("protocol")) is not None:
device_info[ATTR_MANUFACTURER] = protocol.title()
if (client := device.get("client")) is not None:
device_info[ATTR_VIA_DEVICE] = ("tellduslive", client)
return device_info
| {
"content_hash": "b33c9b890927823c2ca7efb947a2d1c7",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 72,
"avg_line_length": 30.528985507246375,
"alnum_prop": 0.6154759079041063,
"repo_name": "home-assistant/home-assistant",
"id": "9e0bf7e96932eb4caa579bfe440c4b21f9661b27",
"size": "4213",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tellduslive/entry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from sunpy.data._sample import download_sample_data
__author__ = "Steven Christe"
__email__ = "steven.christe@nasa.gov"
| {
"content_hash": "b47f2f4c2b6cd5733480881ebb9bea0b",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 23.142857142857142,
"alnum_prop": 0.7222222222222222,
"repo_name": "Alex-Ian-Hamilton/sunpy",
"id": "223a28530bf93c0233b27d89a07bd218765c45f1",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunpy/data/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "72909"
},
{
"name": "Python",
"bytes": "1505795"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('frog', '0013_siteconfig_squashed_0016_remove_siteconfig_site_url'),
]
operations = [
migrations.AlterField(
model_name='siteconfig',
name='favicon',
field=models.FileField(blank=True, null=True, upload_to=''),
),
migrations.AlterField(
model_name='siteconfig',
name='icon',
field=models.ImageField(blank=True, null=True, upload_to=''),
),
]
| {
"content_hash": "70e4fc30d8557f4aa9a261f7cc410f6a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 26.652173913043477,
"alnum_prop": 0.5889070146818923,
"repo_name": "theiviaxx/Frog",
"id": "c96d873f21f920b23c194b612c1719cbb168fff8",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frog/migrations/0014_auto_20190922_0810.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "22225"
},
{
"name": "JavaScript",
"bytes": "57292"
},
{
"name": "Python",
"bytes": "215494"
}
],
"symlink_target": ""
} |
import re
import os.path
import _common
from _common import unittest
import helper
from helper import control_stdin
from beets.mediafile import MediaFile
class TestHelper(helper.TestHelper):
def tagged_copy_cmd(self, tag):
"""Return a conversion command that copies files and appends
`tag` to the copy.
"""
if re.search('[^a-zA-Z0-9]', tag):
raise ValueError(u"tag '{0}' must only contain letters and digits"
.format(tag))
# FIXME This is not portable. For windows we need to use our own
# python script that performs the same task.
return u'cp $source $dest; printf {0} >> $dest'.format(tag)
def assertFileTag(self, path, tag):
"""Assert that the path is a file and the files content ends with `tag`.
"""
self.assertTrue(os.path.isfile(path),
u'{0} is not a file'.format(path))
with open(path) as f:
f.seek(-len(tag), os.SEEK_END)
self.assertEqual(f.read(), tag,
u'{0} is not tagged with {1}'.format(path, tag))
def assertNoFileTag(self, path, tag):
"""Assert that the path is a file and the files content does not
end with `tag`.
"""
self.assertTrue(os.path.isfile(path),
u'{0} is not a file'.format(path))
with open(path) as f:
f.seek(-len(tag), os.SEEK_END)
self.assertNotEqual(f.read(), tag,
u'{0} is unexpectedly tagged with {1}'
.format(path, tag))
class ImportConvertTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets(disk=True) # Converter is threaded
self.importer = self.create_importer()
self.load_plugins('convert')
self.config['convert'] = {
'dest': os.path.join(self.temp_dir, 'convert'),
'command': self.tagged_copy_cmd('convert'),
# Enforce running convert
'max_bitrate': 1,
'auto': True,
'quiet': False,
}
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_import_converted(self):
self.importer.run()
item = self.lib.items().get()
self.assertFileTag(item.path, 'convert')
def test_import_original_on_convert_error(self):
# `false` exits with non-zero code
self.config['convert']['command'] = u'false'
self.importer.run()
item = self.lib.items().get()
self.assertIsNotNone(item)
self.assertTrue(os.path.isfile(item.path))
class ConvertCliTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets(disk=True) # Converter is threaded
self.album = self.add_album_fixture(ext='ogg')
self.item = self.album.items()[0]
self.load_plugins('convert')
self.convert_dest = os.path.join(self.temp_dir, 'convert_dest')
self.config['convert'] = {
'dest': self.convert_dest,
'paths': {'default': 'converted'},
'format': 'mp3',
'formats': {
'mp3': self.tagged_copy_cmd('mp3'),
'opus': {
'command': self.tagged_copy_cmd('opus'),
'extension': 'ops',
}
}
}
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_convert(self):
with control_stdin('y'):
self.run_command('convert', self.item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.assertFileTag(converted, 'mp3')
def test_convert_with_auto_confirmation(self):
self.run_command('convert', '--yes', self.item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.assertFileTag(converted, 'mp3')
def test_rejecet_confirmation(self):
with control_stdin('n'):
self.run_command('convert', self.item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.assertFalse(os.path.isfile(converted))
def test_convert_keep_new(self):
self.assertEqual(os.path.splitext(self.item.path)[1], '.ogg')
with control_stdin('y'):
self.run_command('convert', '--keep-new', self.item.path)
self.item.load()
self.assertEqual(os.path.splitext(self.item.path)[1], '.mp3')
def test_format_option(self):
with control_stdin('y'):
self.run_command('convert', '--format', 'opus', self.item.path)
converted = os.path.join(self.convert_dest, 'converted.ops')
self.assertFileTag(converted, 'opus')
def test_embed_album_art(self):
self.config['convert']['embed'] = True
image_path = os.path.join(_common.RSRC, 'image-2x3.jpg')
self.album.artpath = image_path
self.album.store()
with open(os.path.join(image_path)) as f:
image_data = f.read()
with control_stdin('y'):
self.run_command('convert', self.item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
mediafile = MediaFile(converted)
self.assertEqual(mediafile.images[0].data, image_data)
def test_skip_existing(self):
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.touch(converted, content='XXX')
self.run_command('convert', '--yes', self.item.path)
with open(converted, 'r') as f:
self.assertEqual(f.read(), 'XXX')
class NeverConvertLossyFilesTest(unittest.TestCase, TestHelper):
"""Test the effect of the `never_convert_lossy_files` option.
"""
def setUp(self):
self.setup_beets(disk=True) # Converter is threaded
self.load_plugins('convert')
self.convert_dest = os.path.join(self.temp_dir, 'convert_dest')
self.config['convert'] = {
'dest': self.convert_dest,
'paths': {'default': 'converted'},
'never_convert_lossy_files': True,
'format': 'mp3',
'formats': {
'mp3': self.tagged_copy_cmd('mp3'),
}
}
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_transcode_from_lossles(self):
[item] = self.add_item_fixtures(ext='flac')
with control_stdin('y'):
self.run_command('convert', item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.assertFileTag(converted, 'mp3')
def test_transcode_from_lossy(self):
self.config['convert']['never_convert_lossy_files'] = False
[item] = self.add_item_fixtures(ext='ogg')
with control_stdin('y'):
self.run_command('convert', item.path)
converted = os.path.join(self.convert_dest, 'converted.mp3')
self.assertFileTag(converted, 'mp3')
def test_transcode_from_lossy_prevented(self):
[item] = self.add_item_fixtures(ext='ogg')
with control_stdin('y'):
self.run_command('convert', item.path)
converted = os.path.join(self.convert_dest, 'converted.ogg')
self.assertNoFileTag(converted, 'mp3')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| {
"content_hash": "b98d0718e6ece5278a44655965b1ebf2",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 80,
"avg_line_length": 35.2122641509434,
"alnum_prop": 0.5795043536503683,
"repo_name": "bj-yinyan/beets",
"id": "8b3ec279a409334f8e1a1a55864ad371d0b3bb93",
"size": "8113",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_convert.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "404323"
},
{
"name": "JavaScript",
"bytes": "85858"
},
{
"name": "Python",
"bytes": "1278854"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
} |
import os, re, tempfile, subprocess #, sys, datetime, zipfile
# Location of the source file that defines the current version
VERSION_FILE = '../src/com/caverock/androidsvg/SVG.java'
# Version regex
VERSION_RE = '\sVERSION\s*=\s*"([\d.]+)"'
# Source pom file
ORIG_POM_FILE = 'src-pom.xml'
# Regex for finding the place in the pom file to insert the version number
POM_VERSION_RE = '{{VERSION}}'
# The jar file to be deployed
JAR_FILE = '../bin/androidsvg.jar'
# The dummy sources and javadoc jars
SOURCES_JAR_FILE = 'androidsvg-sources.jar'
JAVADOC_JAR_FILE = 'androidsvg-javadoc.jar'
def main():
# Get the current version number of the library
libraryVersion = get_current_version()
go = raw_input('\nDo maven deploy for version '+libraryVersion+'? (y/N): ')
if not go in ['Y','y']:
exit()
# Get GPG passphrase
#passphrase = raw_input('GPG passphrase: ')
#if passphrase == '':
# print "Exiting: need passphrase."
# exit()
# Create a temporary file to hold the generated pom file
print 'Creating POM file for this version...'
tempPomFile = tempfile.NamedTemporaryFile(suffix='.pom.xml', delete=False)
#print tempPomFile.name
# Write out a new pom file with the version number set to the latest version
srcPomFile = read(ORIG_POM_FILE)
tempPomFile.write(re.sub(POM_VERSION_RE, libraryVersion, srcPomFile))
tempPomFile.close()
# Sign and deploy the artifact
print '\nSigning and deploying artifact...'
basecmd = 'mvn gpg:sign-and-deploy-file'
basecmd += ' -DpomFile=' + tempPomFile.name
basecmd += ' -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/'
basecmd += ' -DrepositoryId=sonatype-nexus-staging'
#basecmd += ' -Dpassphrase=' + passphrase
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(JAR_FILE)
print cmd
os.system(cmd)
# Sign and deploy the dummy sources
print '\nSigning and deploying sources jar...'
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(SOURCES_JAR_FILE)
cmd += ' -Dclassifier=sources'
print cmd
os.system(cmd)
# Sign and deploy the dummy javadoc
print '\nSigning and deploying javadoc jar...'
cmd = basecmd
cmd += ' -Dfile=' + os.path.realpath(JAVADOC_JAR_FILE)
cmd += ' -Dclassifier=javadoc'
print cmd
os.system(cmd)
# Done
print '\nDone!'
def read(src):
file = open(os.path.realpath(src), "rb")
str = file.read()
file.close()
return str
def get_current_version():
versionFile = read(VERSION_FILE)
m = re.search(VERSION_RE, versionFile)
if (m):
return m.group(1)
else:
return ""
def error(msg):
print "ERROR: "+ msg
exit()
if __name__ == "__main__":
main()
| {
"content_hash": "9425cde96fdd858c91a73b40264aa876",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 83,
"avg_line_length": 23.61061946902655,
"alnum_prop": 0.6731634182908546,
"repo_name": "abid-mujtaba/fetchheaders-android",
"id": "baf79944c148c25f2de16f551bbaea90a27ba48a",
"size": "2758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AndroidSVG/maven/maven-release.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45281"
},
{
"name": "Groovy",
"bytes": "6697"
},
{
"name": "Java",
"bytes": "467092"
},
{
"name": "JavaScript",
"bytes": "41163"
},
{
"name": "Python",
"bytes": "2758"
},
{
"name": "Shell",
"bytes": "7394"
}
],
"symlink_target": ""
} |
from PyCA.Core import *
import PyCA.Common as common
import PyCA.Display as display
import numpy as np
import matplotlib.pyplot as plt
import os, errno
def GreedyReg\
(I0Orig, \
I1Orig, \
scales = [1], \
nIters = [1000], \
ustep = [0.25], \
fluidParams = [0.1, 0.1, 0.001], \
plotEvery = 100):
mType = I0Orig.memType()
origGrid = I0Orig.grid()
# allocate vars
I0 = Image3D(origGrid, mType)
I1 = Image3D(origGrid, mType)
h = Field3D(origGrid, mType)
Idef = Image3D(origGrid, mType)
diff = Image3D(origGrid, mType)
gI = Field3D(origGrid, mType)
gU = Field3D(origGrid, mType)
scratchI = Image3D(origGrid, mType)
scratchV = Field3D(origGrid, mType)
# allocate diffOp
if mType == MEM_HOST:
diffOp = FluidKernelFFTCPU()
else:
diffOp = FluidKernelFFTGPU()
# initialize some vars
zerovec = Vec3Df(0.0, 0.0, 0.0)
nScales = len(scales)
scaleManager = MultiscaleManager(origGrid)
for s in scales:
scaleManager.addScaleLevel(s)
# Initalize the thread memory manager (needed for resampler)
# num pools is 2 (images) + 2*3 (fields)
ThreadMemoryManager.init(origGrid, mType, 8)
if mType == MEM_HOST:
resampler = MultiscaleResamplerGaussCPU(origGrid)
else:
resampler = MultiscaleResamplerGaussGPU(origGrid)
def setScale(scale):
global curGrid
scaleManager.set(scale)
curGrid = scaleManager.getCurGrid()
# since this is only 2D:
curGrid.spacing().z = 1.0;
resampler.setScaleLevel(scaleManager)
diffOp.setAlpha(fluidParams[0])
diffOp.setBeta(fluidParams[1])
diffOp.setGamma(fluidParams[2])
diffOp.setGrid(curGrid)
# downsample images
I0.setGrid(curGrid)
I1.setGrid(curGrid)
if scaleManager.isLastScale():
Copy(I0, I0Orig)
Copy(I1, I1Orig)
else:
resampler.downsampleImage(I0,I0Orig)
resampler.downsampleImage(I1,I1Orig)
# initialize / upsample deformation
if scaleManager.isFirstScale():
h.setGrid(curGrid)
SetToIdentity(h)
else:
resampler.updateHField(h)
# set grids
gI.setGrid(curGrid)
Idef.setGrid(curGrid)
diff.setGrid(curGrid)
gU.setGrid(curGrid)
scratchI.setGrid(curGrid)
scratchV.setGrid(curGrid)
# end function
energy = [[] for _ in xrange(3)]
for scale in range(len(scales)):
setScale(scale)
for it in range(nIters[scale]):
print 'iter %d'%it
# compute deformed image
ApplyH(Idef, I0, h)
# update gradient
Gradient(gI, Idef)
# update u
Sub(diff, I1, Idef)
gI *= diff
diffOp.applyInverseOperator(gU, gI)
gU *= ustep[scale]
# ApplyV(scratchV, h, gU, BACKGROUND_STRATEGY_PARTIAL_ID)
ComposeHV(scratchV, h, gU)
h.swap(scratchV)
# compute energy
energy[0].append(Sum2(diff))
if it % plotEvery == 0 or it == nIters[scale]-1:
clrlist = ['r','g','b','m','c','y','k']
plt.figure('energy')
for i in range(len(energy)):
plt.plot(energy[i],clrlist[i])
if i == 0:
plt.hold(True)
plt.hold(False)
plt.draw()
plt.figure('results')
plt.clf()
plt.subplot(3,2,1)
display.DispImage(I0, 'I0', newFig=False)
plt.subplot(3,2,2)
display.DispImage(I1, 'I1', newFig=False)
plt.subplot(3,2,3)
display.DispImage(Idef, 'def', newFig=False)
plt.subplot(3,2,4)
display.DispImage(diff, 'diff', newFig=False)
plt.colorbar()
plt.subplot(3,2,5)
display.GridPlot(h, every=4, isVF=False)
plt.draw()
plt.show()
# end plot
# end iteration
# end scale
return (Idef, h, energy)
# end function
if __name__ == '__main__':
plt.close('all')
if GetNumberOfCUDADevices() > 0:
mType = MEM_DEVICE
else:
print "No CUDA devices found, running on CPU"
mType = MEM_HOST
imagedir='./Images/'
#
# Run lena images
#
I0 = common.LoadPNGImage(imagedir + 'lena_deformed.png', mType)
I1 = common.LoadPNGImage(imagedir + 'lena_orig.png', mType)
(Idef, h, energy) = \
GreedyReg(I0, \
I1, \
scales = [2,1], \
nIters = [1000,1000], \
ustep = [0.1, 0.1], \
fluidParams = [0.5, 0.5, 0.001], \
plotEvery = 500)
| {
"content_hash": "a70326915fa623c41cc1c8a26dadb0bd",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 69,
"avg_line_length": 26.689839572192515,
"alnum_prop": 0.5301542776998598,
"repo_name": "rkwitt/quicksilver",
"id": "506005c840065b1eebdc94dad0fb10c76b2bf355",
"size": "5047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "3rd_party_software/pyca/Examples/GreedyReg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1821"
},
{
"name": "C++",
"bytes": "1036003"
},
{
"name": "CMake",
"bytes": "32049"
},
{
"name": "Cuda",
"bytes": "313431"
},
{
"name": "M",
"bytes": "717"
},
{
"name": "Matlab",
"bytes": "758372"
},
{
"name": "Nix",
"bytes": "1414"
},
{
"name": "Python",
"bytes": "345966"
},
{
"name": "Shell",
"bytes": "43849"
},
{
"name": "Smarty",
"bytes": "19997"
}
],
"symlink_target": ""
} |
"""
@brief test log(time=12s)
"""
import os
import unittest
from pyquickhelper.loghelper import fLOG
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
from pyquickhelper.pycode import add_missing_development_version, ExtTestCase
import ensae_teaching_cs
class TestNotebookCov_Session9_1a(ExtTestCase):
def setUp(self):
add_missing_development_version(["pymyinstall", "pyensae", "jyquickhelper"],
__file__, hide=True)
def test_notebook_session9(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
self.assertTrue(ensae_teaching_cs is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks", "td1a_algo")
test_notebook_execution_coverage(__file__, "session9", folder,
this_module_name="ensae_teaching_cs", fLOG=fLOG)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "b6ce587fa46a9720ce53c8c4691a76e0",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 89,
"avg_line_length": 33,
"alnum_prop": 0.5909090909090909,
"repo_name": "sdpython/ensae_teaching_cs",
"id": "4b96470cbc9c948bcf4e7ca803c242656ddd3e96",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_unittests/ut_dnotebooks/test_nb_coverage_session9_1a.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "C#",
"bytes": "26850"
},
{
"name": "CSS",
"bytes": "220769"
},
{
"name": "HTML",
"bytes": "44390"
},
{
"name": "JavaScript",
"bytes": "31077"
},
{
"name": "Jupyter Notebook",
"bytes": "45255629"
},
{
"name": "PostScript",
"bytes": "169142"
},
{
"name": "Python",
"bytes": "1770141"
},
{
"name": "R",
"bytes": "339"
},
{
"name": "Shell",
"bytes": "3675"
},
{
"name": "TeX",
"bytes": "593824"
}
],
"symlink_target": ""
} |
from .statement import Statement
from .signature import Signature
| {
"content_hash": "866cd656d65329cccb20ca76ed67ebf3",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 32,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.835820895522388,
"repo_name": "akfork/ChatterBot",
"id": "f7789a019fa29fd04ceff1809f5374565e70f0a6",
"size": "67",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chatterbot/conversation/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "71674"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslcertlink(base_resource) :
""" Configuration for linked certificate resource. """
#------- Read only Parameter ---------
def __init__(self) :
self._certkeyname = ""
self._linkcertkeyname = ""
self.___count = 0
@property
def certkeyname(self) :
ur"""Certificate key name.
"""
try :
return self._certkeyname
except Exception as e:
raise e
@property
def linkcertkeyname(self) :
ur"""Name of the Certificate-Authority.
"""
try :
return self._linkcertkeyname
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslcertlink_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslcertlink
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the sslcertlink resources that are configured on netscaler.
"""
try :
if not name :
obj = sslcertlink()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of sslcertlink resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertlink()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the sslcertlink resources configured on NetScaler.
"""
try :
obj = sslcertlink()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of sslcertlink resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = sslcertlink()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class sslcertlink_response(base_response) :
def __init__(self, length=1) :
self.sslcertlink = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslcertlink = [sslcertlink() for _ in range(length)]
| {
"content_hash": "c2c6ed3649b937e8e698a7f824bc8dba",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 113,
"avg_line_length": 27.143939393939394,
"alnum_prop": 0.6857382082054144,
"repo_name": "benfinke/ns_python",
"id": "0620cfab75b4e5795a2418175de67b5c989f9283",
"size": "4197",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/lib/nssrc/com/citrix/netscaler/nitro/resource/config/ssl/sslcertlink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21836782"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
"""Tests for the creator dashboard and the notifications dashboard."""
from core.controllers import creator_dashboard
from core.domain import event_services
from core.domain import exp_services
from core.domain import feedback_domain
from core.domain import feedback_services
from core.domain import rating_services
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import user_jobs_continuous
from core.domain import user_jobs_continuous_test
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
import feconf
(user_models, stats_models) = models.Registry.import_models(
[models.NAMES.user, models.NAMES.statistics])
taskqueue_services = models.Registry.import_taskqueue_services()
class HomePageTest(test_utils.GenericTestBase):
def test_logged_out_homepage(self):
"""Test the logged-out version of the home page."""
response = self.testapp.get('/')
self.assertEqual(response.status_int, 302)
self.assertIn('splash', response.headers['location'])
def test_notifications_dashboard_redirects_for_logged_out_users(self):
"""Test the logged-out view of the notifications dashboard."""
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 302)
# This should redirect to the login page.
self.assertIn('signup', response.headers['location'])
self.assertIn('notifications_dashboard', response.headers['location'])
self.login('reader@example.com')
response = self.testapp.get('/notifications_dashboard')
# This should redirect the user to complete signup.
self.assertEqual(response.status_int, 302)
self.logout()
def test_logged_in_notifications_dashboard(self):
"""Test the logged-in view of the notifications dashboard."""
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
response = self.testapp.get('/notifications_dashboard')
self.assertEqual(response.status_int, 200)
self.logout()
class CreatorDashboardStatisticsTest(test_utils.GenericTestBase):
OWNER_EMAIL_1 = 'owner1@example.com'
OWNER_USERNAME_1 = 'owner1'
OWNER_EMAIL_2 = 'owner2@example.com'
OWNER_USERNAME_2 = 'owner2'
EXP_ID_1 = 'exp_id_1'
EXP_TITLE_1 = 'Exploration title 1'
EXP_ID_2 = 'exp_id_2'
EXP_TITLE_2 = 'Exploration title 2'
EXP_DEFAULT_VERSION = 1
USER_SESSION_ID = 'session1'
USER_IMPACT_SCORE_DEFAULT = 0.0
def setUp(self):
super(CreatorDashboardStatisticsTest, self).setUp()
self.signup(self.OWNER_EMAIL_1, self.OWNER_USERNAME_1)
self.signup(self.OWNER_EMAIL_2, self.OWNER_USERNAME_2)
self.owner_id_1 = self.get_user_id_from_email(self.OWNER_EMAIL_1)
self.owner_id_2 = self.get_user_id_from_email(self.OWNER_EMAIL_2)
self.owner_1 = user_services.UserActionsInfo(self.owner_id_1)
def _record_start(self, exp_id, exp_version, state):
"""Record start event to an exploration.
Completing the exploration is not necessary here since the total_plays
are currently being counted taking into account only the # of starts.
"""
event_services.StartExplorationEventHandler.record(
exp_id, exp_version, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
event_services.StatsEventsHandler.record(
exp_id, exp_version, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
def _rate_exploration(self, exp_id, ratings):
"""Create num_ratings ratings for exploration with exp_id,
of values from ratings.
"""
# Generate unique user ids to rate an exploration. Each user id needs
# to be unique since each user can only give an exploration one rating.
user_ids = ['user%d' % i for i in range(len(ratings))]
self.process_and_flush_pending_tasks()
for ind, user_id in enumerate(user_ids):
rating_services.assign_rating_to_exploration(
user_id, exp_id, ratings[ind])
self.process_and_flush_pending_tasks()
def _run_user_stats_aggregator_job(self):
(user_jobs_continuous_test.ModifiedUserStatsAggregator.
start_computation())
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_CONTINUOUS_JOBS), 0)
self.process_and_flush_pending_tasks()
def test_stats_no_explorations(self):
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self._run_user_stats_aggregator_job()
self.assertIsNone(user_models.UserStatsModel.get(
self.owner_id_1, strict=False))
self.logout()
def test_one_play_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 0)
self.assertIsNone(user_model.average_ratings)
self.logout()
def test_one_rating_for_single_exploration(self):
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_id = self.EXP_ID_1
self._rate_exploration(exp_id, [4])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 0)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_one_play_and_rating_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_id = self.EXP_ID_1
exp_version = self.EXP_DEFAULT_VERSION
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._rate_exploration(exp_id, [3])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 3)
self.logout()
def test_multiple_plays_and_ratings_for_single_exploration(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._rate_exploration(exp_id, [3, 4, 5])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 4)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_one_play_and_rating_for_multiple_explorations(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._rate_exploration(exp_id_1, [4])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 1)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 1)
self.assertEquals(user_model.average_ratings, 4)
self.logout()
def test_multiple_plays_and_ratings_for_multiple_explorations(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
exploration_2 = self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
exp_id_2 = self.EXP_ID_2
state_2 = exploration_2.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._rate_exploration(exp_id_1, [4])
self._rate_exploration(exp_id_2, [3, 3])
self._run_user_stats_aggregator_job()
user_model = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(user_model.total_plays, 3)
self.assertEquals(
user_model.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model.num_ratings, 3)
self.assertEquals(user_model.average_ratings, 10 / 3.0)
self.logout()
def test_stats_for_single_exploration_with_multiple_owners(self):
exploration = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_1, self.owner_id_2,
rights_manager.ROLE_OWNER)
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
exp_version = self.EXP_DEFAULT_VERSION
exp_id = self.EXP_ID_1
state = exploration.init_state_name
self._record_start(exp_id, exp_version, state)
self._record_start(exp_id, exp_version, state)
self._rate_exploration(exp_id, [3, 4, 5])
self.logout()
self.login(self.OWNER_EMAIL_2)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self._rate_exploration(exp_id, [3, 4, 5])
self._run_user_stats_aggregator_job()
user_model_1 = user_models.UserStatsModel.get(
self.owner_id_1)
self.assertEquals(user_model_1.total_plays, 2)
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_1.num_ratings, 3)
self.assertEquals(user_model_1.average_ratings, 4)
user_model_2 = user_models.UserStatsModel.get(
self.owner_id_2)
self.assertEquals(user_model_2.total_plays, 2)
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(user_model_2.num_ratings, 3)
self.assertEquals(user_model_2.average_ratings, 4)
self.logout()
def test_stats_for_multiple_explorations_with_multiple_owners(self):
exploration_1 = self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
exploration_2 = self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_1, self.owner_id_2,
rights_manager.ROLE_OWNER)
rights_manager.assign_role_for_exploration(
self.owner_1, self.EXP_ID_2, self.owner_id_2,
rights_manager.ROLE_OWNER)
self.login(self.OWNER_EMAIL_2)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_version = self.EXP_DEFAULT_VERSION
exp_id_1 = self.EXP_ID_1
state_1 = exploration_1.init_state_name
exp_id_2 = self.EXP_ID_2
state_2 = exploration_2.init_state_name
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_1, exp_version, state_1)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._record_start(exp_id_2, exp_version, state_2)
self._rate_exploration(exp_id_1, [5, 3])
self._rate_exploration(exp_id_2, [5, 5])
self._run_user_stats_aggregator_job()
expected_results = {
'total_plays': 5,
'num_ratings': 4,
'average_ratings': 18 / 4.0
}
user_model_2 = user_models.UserStatsModel.get(self.owner_id_2)
self.assertEquals(
user_model_2.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_2.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(
user_model_2.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_2.average_ratings, expected_results['average_ratings'])
self.logout()
self.login(self.OWNER_EMAIL_1)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
user_model_1 = user_models.UserStatsModel.get(self.owner_id_1)
self.assertEquals(
user_model_1.total_plays, expected_results['total_plays'])
self.assertEquals(
user_model_1.impact_score, self.USER_IMPACT_SCORE_DEFAULT)
self.assertEquals(
user_model_1.num_ratings, expected_results['num_ratings'])
self.assertEquals(
user_model_1.average_ratings, expected_results['average_ratings'])
self.logout()
class CreatorDashboardHandlerTest(test_utils.GenericTestBase):
COLLABORATOR_EMAIL = 'collaborator@example.com'
COLLABORATOR_USERNAME = 'collaborator'
OWNER_EMAIL_1 = 'owner1@example.com'
OWNER_USERNAME_1 = 'owner1'
OWNER_EMAIL_2 = 'owner2@example.com'
OWNER_USERNAME_2 = 'owner2'
EXP_ID = 'exp_id'
EXP_TITLE = 'Exploration title'
EXP_ID_1 = 'exp_id_1'
EXP_TITLE_1 = 'Exploration title 1'
EXP_ID_2 = 'exp_id_2'
EXP_TITLE_2 = 'Exploration title 2'
EXP_ID_3 = 'exp_id_3'
EXP_TITLE_3 = 'Exploration title 3'
def setUp(self):
super(CreatorDashboardHandlerTest, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup(self.OWNER_EMAIL_1, self.OWNER_USERNAME_1)
self.signup(self.OWNER_EMAIL_2, self.OWNER_USERNAME_2)
self.signup(self.COLLABORATOR_EMAIL, self.COLLABORATOR_USERNAME)
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner_id_1 = self.get_user_id_from_email(self.OWNER_EMAIL_1)
self.owner_id_2 = self.get_user_id_from_email(self.OWNER_EMAIL_2)
self.owner = user_services.UserActionsInfo(self.owner_id)
self.owner_1 = user_services.UserActionsInfo(self.owner_id_1)
self.collaborator_id = self.get_user_id_from_email(
self.COLLABORATOR_EMAIL)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def test_no_explorations(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_no_explorations_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL)
# Testing that creator only visit dashboard without any exploration
# created.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 0)
self.logout()
def test_create_single_exploration_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL)
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
# Testing the quantity of exploration created and it should be 1.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.logout()
def test_create_two_explorations_delete_one_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL_1)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_1, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_1, title=self.EXP_TITLE_2)
# Testing the quantity of exploration and it should be 2.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 2)
exp_services.delete_exploration(self.owner_id_1, self.EXP_ID_1)
# Testing whether 1 exploration left after deletion of previous one.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.logout()
def test_create_multiple_explorations_delete_all_and_visit_dashboard(self):
self.login(self.OWNER_EMAIL_2)
self.save_new_default_exploration(
self.EXP_ID_1, self.owner_id_2, title=self.EXP_TITLE_1)
self.save_new_default_exploration(
self.EXP_ID_2, self.owner_id_2, title=self.EXP_TITLE_2)
self.save_new_default_exploration(
self.EXP_ID_3, self.owner_id_2, title=self.EXP_TITLE_3)
# Testing for quantity of explorations to be 3.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 3)
# Testing for deletion of all created previously.
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_1)
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_2)
exp_services.delete_exploration(self.owner_id_2, self.EXP_ID_3)
# All explorations have been deleted, so the dashboard query should not
# load any explorations.
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 0)
self.logout()
def test_managers_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.set_admins([self.OWNER_USERNAME])
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
self.logout()
def test_collaborators_can_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner, self.EXP_ID, self.collaborator_id,
rights_manager.ROLE_EDITOR)
self.set_admins([self.OWNER_USERNAME])
self.login(self.COLLABORATOR_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PRIVATE)
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['status'],
rights_manager.ACTIVITY_STATUS_PUBLIC)
self.logout()
def test_viewer_cannot_see_explorations(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
rights_manager.assign_role_for_exploration(
self.owner, self.EXP_ID, self.viewer_id,
rights_manager.ROLE_VIEWER)
self.set_admins([self.OWNER_USERNAME])
self.login(self.VIEWER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
rights_manager.publish_exploration(self.owner, self.EXP_ID)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(response['explorations_list'], [])
self.logout()
def test_can_see_feedback_thread_counts(self):
self.save_new_default_exploration(
self.EXP_ID, self.owner_id, title=self.EXP_TITLE)
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 0)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 0)
def mock_get_thread_analytics_multi(unused_exploration_ids):
return [feedback_domain.FeedbackAnalytics(self.EXP_ID, 2, 3)]
with self.swap(
feedback_services, 'get_thread_analytics_multi',
mock_get_thread_analytics_multi):
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['explorations_list']), 1)
self.assertEqual(
response['explorations_list'][0]['num_open_threads'], 2)
self.assertEqual(
response['explorations_list'][0]['num_total_threads'], 3)
self.logout()
def test_can_see_subscribers(self):
self.login(self.OWNER_EMAIL)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 0)
# Subscribe to creator.
subscription_services.subscribe_to_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 1)
self.assertEqual(
response['subscribers_list'][0]['subscriber_username'],
self.VIEWER_USERNAME)
# Unsubscribe from creator.
subscription_services.unsubscribe_from_creator(
self.viewer_id, self.owner_id)
response = self.get_json(feconf.CREATOR_DASHBOARD_DATA_URL)
self.assertEqual(len(response['subscribers_list']), 0)
class NotificationsDashboardHandlerTest(test_utils.GenericTestBase):
DASHBOARD_DATA_URL = '/notificationsdashboardhandler/data'
def setUp(self):
super(NotificationsDashboardHandlerTest, self).setUp()
self.signup(self.VIEWER_EMAIL, self.VIEWER_USERNAME)
self.viewer_id = self.get_user_id_from_email(self.VIEWER_EMAIL)
def _get_recent_notifications_mock_by_viewer(self, unused_user_id):
"""Returns a single feedback thread by VIEWER_ID."""
return (
100000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': self.viewer_id,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def _get_recent_notifications_mock_by_anonymous_user(self, unused_user_id):
"""Returns a single feedback thread by an anonymous user."""
return (
200000, [{
'activity_id': 'exp_id',
'activity_title': 'exp_title',
'author_id': None,
'last_updated_ms': 100000,
'subject': 'Feedback Message Subject',
'type': feconf.UPDATE_TYPE_FEEDBACK_MESSAGE,
}])
def test_author_ids_are_handled_correctly(self):
"""Test that author ids are converted into author usernames
and that anonymous authors are handled correctly.
"""
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_viewer):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'],
self.VIEWER_USERNAME)
self.assertNotIn('author_id', response['recent_notifications'][0])
with self.swap(
user_jobs_continuous.DashboardRecentUpdatesAggregator,
'get_recent_notifications',
self._get_recent_notifications_mock_by_anonymous_user):
self.login(self.VIEWER_EMAIL)
response = self.get_json(self.DASHBOARD_DATA_URL)
self.assertEqual(len(response['recent_notifications']), 1)
self.assertEqual(
response['recent_notifications'][0]['author_username'], '')
self.assertNotIn('author_id', response['recent_notifications'][0])
class CreationButtonsTest(test_utils.GenericTestBase):
def setUp(self):
super(CreationButtonsTest, self).setUp()
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
def test_new_exploration_ids(self):
"""Test generation of exploration ids."""
self.login(self.EDITOR_EMAIL)
response = self.testapp.get(feconf.CREATOR_DASHBOARD_URL)
self.assertEqual(response.status_int, 200)
csrf_token = self.get_csrf_token_from_response(response)
exp_a_id = self.post_json(
feconf.NEW_EXPLORATION_URL, {}, csrf_token
)[creator_dashboard.EXPLORATION_ID_KEY]
self.assertEqual(len(exp_a_id), 12)
self.logout()
| {
"content_hash": "74a9dfe068055d3eb44cce8b2946a800",
"timestamp": "",
"source": "github",
"line_count": 695,
"max_line_length": 79,
"avg_line_length": 41.325179856115106,
"alnum_prop": 0.6455206991400021,
"repo_name": "AllanYangZhou/oppia",
"id": "99f5225395b52110e078a3885395240f8c95cbd4",
"size": "29326",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/controllers/creator_dashboard_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "82690"
},
{
"name": "HTML",
"bytes": "1128088"
},
{
"name": "JavaScript",
"bytes": "3945933"
},
{
"name": "Python",
"bytes": "4888439"
},
{
"name": "Shell",
"bytes": "50051"
}
],
"symlink_target": ""
} |
"""Google Cloud Dataproc Authenticator for Sparkmagic"""
import json
import os
import subprocess
import re
import random
import urllib3.util
from hdijupyterutils.ipythondisplay import IpythonDisplay
import ipyvuetify as v
from google.cloud import dataproc_v1beta2
import google.auth.transport.requests
from google.auth import _cloud_sdk
from google.auth.exceptions import UserAccessTokenError
from google.oauth2.credentials import Credentials
from sparkmagic.auth.customauth import Authenticator
from sparkmagic.livyclientlib.exceptions import BadUserConfigurationException
import googledataprocauthenticator.utils.constants as constants
ipython_display = IpythonDisplay()
def list_credentialed_user_accounts():
"""Load all of user's credentialed accounts with ``gcloud auth list`` command.
Returns:
Sequence[str]: each value is a str of one of the users credentialed accounts
Raises:
sparkmagic.livyclientlib.BadUserConfigurationException: if gcloud cannot be invoked
"""
accounts_json = ""
if os.name == "nt":
command = constants.CLOUD_SDK_WINDOWS_COMMAND
else:
command = constants.CLOUD_SDK_POSIX_COMMAND
try:
command = (command,) + constants.CLOUD_SDK_USER_CREDENTIALED_ACCOUNTS_COMMAND
# run `gcloud auth list` command
accounts_json = subprocess.check_output(command, stderr=subprocess.STDOUT)
account_objects = json.loads(accounts_json)
credentialed_accounts = list()
active_account = None
#convert account dictionaries with status and account keys to a list of accounts
for account in account_objects:
try:
# if the account does not have an access token we don't add it to the account
# dropdown
_cloud_sdk.get_auth_access_token(account['account'])
# service accounts will be added later with 'default-credentials'
get_credentials_for_account(account['account'])
if account['status'] == 'ACTIVE':
active_account = account['account']
credentialed_accounts.append(account['account'])
# when`gcloud auth print-access-token --account=account` fails we don't add it to
# credentialed_accounts list that populates account dropdown widget
except:
pass
return credentialed_accounts, active_account
except Exception as caught_exc:
new_exc = BadUserConfigurationException("Gcloud cannot be invoked.")
raise new_exc from caught_exc
def get_project_id(account):
"""Gets the the Cloud SDK project ID property value using the
``gcloud config get-value project --account=ACCOUNT`` command.
Args:
account (str): The account to get the project ID for
Returns:
Optional[str]: The project ID.
"""
if os.name == "nt":
command = constants.CLOUD_SDK_WINDOWS_COMMAND
else:
command = constants.CLOUD_SDK_POSIX_COMMAND
try:
config_get_project_command = ("config", "get-value", 'project', '--account', account)
output = subprocess.check_output(
(command,) + config_get_project_command, stderr=subprocess.STDOUT
)
return output.decode("utf-8").rstrip()
except Exception:
return None
def get_credentials_for_account(account, scopes_list=None):
"""Load all of user's credentialed accounts with ``gcloud auth describe ACCOUNT`` command.
Args:
account (str): user credentialed account to return credentials for
scopes_list (Sequence[str]): list of scopes to include in the credentials.
Returns:
google.oauth2.credentials.Credentials: The constructed credentials
Raises:
ValueError: If `gcloud auth describe ACCOUNT --format json` returns json not in the
expected format.
google.auth.exceptions.UserAccessTokenError: if credentials could not be found for the
given account.
"""
if os.name == "nt":
command = constants.CLOUD_SDK_WINDOWS_COMMAND
else:
command = constants.CLOUD_SDK_POSIX_COMMAND
try:
describe_account_command = ("auth", "describe", account, '--format', 'json')
command = (command,) + describe_account_command
account_json = subprocess.check_output(command, stderr=subprocess.STDOUT)
account_describe = json.loads(account_json)
credentials = Credentials.from_authorized_user_info(account_describe, scopes=scopes_list)
# if quota_project_id is None, we try to get infer a project from that accounts gcloud
# configuration
if credentials.quota_project_id is None:
credentials = credentials.with_quota_project(get_project_id(account))
return (credentials, credentials.quota_project_id)
except Exception as caught_exc:
new_exc = UserAccessTokenError(f"Could not obtain access token for {account}")
raise new_exc from caught_exc
def get_component_gateway_url(project_id, region, cluster_name, credentials):
"""Gets the component gateway url for a cluster name, project id, and region
Args:
project_id (str): The project id to use for the url
region (str): The project id to use for the url
cluster_name (Optional[str]): The cluster name to use for the url
credentials (google.oauth2.credentials.Credentials): The authorization credentials to
attach to requests.
Returns:
str: the component gateway url
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due to a retryable error and
retry attempts failed.
ValueError: If the parameters are invalid.
"""
try:
client = dataproc_v1beta2.ClusterControllerClient(
credentials=credentials,
client_options={
"api_endpoint": f"{region}-dataproc.googleapis.com:443"
}
)
except:
raise
try:
#if they do not enter a cluster name, we get a random one for them.
if cluster_name is None:
cluster_pool, _ = get_cluster_pool(project_id, region, client)
cluster_name = random.choice(cluster_pool)
response = client.get_cluster(project_id=project_id, region=region, cluster_name=cluster_name)
url = response.config.endpoint_config.http_ports.popitem()[1]
parsed_uri = urllib3.util.parse_url(url)
endpoint_address = f"{parsed_uri.scheme}://{parsed_uri.netloc}/gateway/default/livy/v1"
return endpoint_address, cluster_name
except:
raise
def get_cluster_pool(project_id, region, client, selected_filters=None):
"""Gets the clusters for a project, region, and filters
Args:
project_id (str): The project id to use
region (str): The region to use
client (dataproc_v1beta2.ClusterControllerClient): The client that provides the
listing clusters method
credentials (google.oauth2.credentials.Credentials): The authorization credentials to
attach to requests.
Returns:
str: the component gateway url
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due to a retryable error and
retry attempts failed.
ValueError: If the parameters are invalid.
"""
cluster_pool = list()
filter_set = set()
filters = ['status.state=ACTIVE']
if selected_filters is not None:
filters.extend(selected_filters)
filter_str = ' AND '.join(filters)
try:
for cluster in client.list_clusters(request={'project_id' : project_id, 'region' : region, 'filter': filter_str}):
#check component gateway is enabled
if len(cluster.config.endpoint_config.http_ports.values()) != 0:
action_list = list()
for action in cluster.config.initialization_actions:
# check if livy init action with a region with the regex pattern [a-z0-9-]+
is_livy_action = re.search("gs://goog-dataproc-initialization-actions-"\
"[a-z0-9-]+/livy/livy.sh", action.executable_file) is not None
if is_livy_action:
action_list.append(action.executable_file)
cluster_pool.append(cluster.cluster_name)
for key, value in cluster.labels.items():
filter_set.add('labels.' + key + '=' + value)
return cluster_pool, list(filter_set)
except:
raise
def get_regions():
"""Returns a static list of regions for the region combobox"""
regions = ['asia-east1', 'asia-east2', 'asia-northeast1', 'asia-northeast2', 'asia-northeast3',\
'asia-south1', 'asia-southeast1', 'asia-southeast2', 'australia-southeast1', 'europe-north1', \
'europe-west1', 'europe-west2', 'europe-west3', 'europe-west4', 'europe-west5', 'europe-west6',\
'northamerica-northeast1', 'southamerica-east1', 'us-central1', 'us-central2', 'us-east1', \
'us-east2', 'us-east4', 'us-west1', 'us-west2', 'us-west3', 'us-west4']
return regions
def application_default_credentials_configured():
"""Checks if google application-default credentials are configured"""
try:
credentials, _ = google.auth.default(scopes=['https://www.googleapis.com/auth/' \
'cloud-platform', 'https://www.googleapis.com/auth/userinfo.email'])
except:
return False
return credentials is not None
class GoogleAuth(Authenticator):
"""Custom Authenticator to use Google OAuth with SparkMagic."""
def __init__(self, parsed_attributes=None):
self.callable_request = google.auth.transport.requests.Request()
self.scopes = ['https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/userinfo.email']
self.credentialed_accounts, active_user_account = list_credentialed_user_accounts()
self.default_credentials_configured = application_default_credentials_configured()
if self.default_credentials_configured:
self.credentialed_accounts.append('default-credentials')
self.active_credentials = None
if parsed_attributes is not None:
if parsed_attributes.account in self.credentialed_accounts:
self.active_credentials = parsed_attributes.account
if self.active_credentials == 'default-credentials' and \
self.default_credentials_configured:
self.credentials, self.project = google.auth.default(scopes=self.scopes)
else:
self.credentials, self.project = get_credentials_for_account(
self.active_credentials, self.scopes
)
else:
new_exc = BadUserConfigurationException(
f"{parsed_attributes.account} is not a credentialed account. Run `gcloud "\
"auth login` in your command line to authorize gcloud to access the Cloud "\
"Platform with Google user credentials to authenticate. Run `gcloud auth "\
"application-default login` to acquire new user credentials to use for "\
"Application Default Credentials. Run `gcloud auth list` to see your credentialed "\
"accounts.")
raise new_exc
else:
if self.default_credentials_configured:
self.credentials, self.project = google.auth.default(scopes=self.scopes)
self.active_credentials = 'default-credentials'
elif active_user_account is not None:
self.credentials, self.project = get_credentials_for_account(
active_user_account, self.scopes
)
self.active_credentials = active_user_account
else:
self.credentials, self.project = None, None
Authenticator.__init__(self, parsed_attributes)
self.widgets = self.get_widgets(constants.WIDGET_WIDTH)
def get_widgets(self, widget_width):
"""Creates and returns an address widget
Args:
widget_width (str): The width of all widgets to be created.
Returns:
Sequence[hdijupyterutils.ipywidgetfactory.IpyWidgetFactory]: list of widgets
"""
self.project_widget = v.TextField(
class_='ma-2',
placeholder=constants.ENTER_PROJECT_MESSAGE,
label='Project ID *',
dense=True,
v_model=self.project, #will be none if no project can be determined from credentials
color='primary',
outlined=True,
)
self.account_widget = v.Select(
class_='ma-2',
placeholder='No accounts found',
label='Account *',
dense=True,
color='primary',
hide_selected=False,
outlined=True,
v_model=self.active_credentials,
items=self.credentialed_accounts,
auto_select_first=True,
# v_slots allows help message to be displayed if no accounts are found.
v_slots=[{
'name':
'no-data',
'children':
v.ListItem(children=[
v.ListItemContent(children=[
v.ListItemTitle(
children=[constants.NO_ACCOUNTS_FOUND_HELP_MESSAGE])
])
])
}],
)
self.region_widget = v.Combobox(
class_='ma-2',
placeholder=constants.SELECT_REGION_MESSAGE,
label='Region *',
dense=True,
color='primary',
hide_selected=True,
outlined=True,
items=get_regions(),
v_model=None,
)
self.filter_widget = v.Combobox(
class_='ma-2',
placeholder=constants.NO_FILTERS_FOUND_MESSAGE,
multiple=True,
label='Filter by label',
chips=True,
dense=True,
deletable_chips=True,
color='primary',
hide_selected=True,
outlined=True,
items=[],
auto_select_first=True,
v_model=None,
v_slots=[{
'name':
'no-data',
'children':
v.ListItem(children=[
v.ListItemContent(children=[
v.ListItemTitle(
children=[constants.NO_FILTERS_FOUND_HELP_MESSAGE])
])
])
}],
)
self.cluster_widget = v.Combobox(
class_='ma-2',
placeholder=constants.NO_CLUSTERS_FOUND_MESSAGE,
label='Cluster',
dense=True,
color='primary',
hide_selected=True,
outlined=True,
items=[],
auto_select_first=True,
v_model=None,
v_slots=[{
'name':
'no-data',
'children':
v.ListItem(children=[
v.ListItemContent(children=[
v.ListItemTitle(
children=[constants.NO_CLUSTERS_FOUND_HELP_MESSAGE])
])
])
}],
)
self.account_widget.on_event('change', self._update_active_credentials)
self.project_widget.on_event('change', self._update_project)
self.region_widget.on_event('change', self._update_cluster_list_on_region)
self.filter_widget.on_event('change', self._update_cluster_list_on_filter)
widgets = [self.account_widget, self.project_widget, self.region_widget,
self.cluster_widget, self.filter_widget]
return widgets
def _update_project(self, _widget, _event, data):
if self.account_widget.v_model is not None and self.region_widget.v_model is not None:
self.initialize_credentials_with_auth_account_selection(self.account_widget.v_model)
# checks if project texbox's value is valid
try:
_, _ = get_component_gateway_url(data, self.region_widget.v_model, None,
self.credentials)
self.project_widget.error = False
self.region_widget.error = False
self.project = self.project_widget.v_model
client = dataproc_v1beta2.ClusterControllerClient(
credentials=self.credentials,
client_options={
"api_endpoint": f"{self.region_widget.v_model}-dataproc.googleapis.com:443"
}
)
self.cluster_widget.items, self.filter_widget.items = get_cluster_pool(
self.project_widget.v_model, self.region_widget.v_model, client
)
self._update_widgets_placeholder_text()
except IndexError:
self.project_widget.error = False
pass
except Exception:
self.project_widget.error = True
ipython_display.send_error("Please make sure you have entered a correct Project "\
"ID and Region.")
self.cluster_widget.placeholder = constants.NO_CLUSTERS_FOUND_MESSAGE
self.filter_widget.placeholder = constants.NO_FILTERS_FOUND_MESSAGE
self.cluster_widget.items = []
self.filter_widget.items = []
def _update_active_credentials(self, _widget, _event, data):
self.initialize_credentials_with_auth_account_selection(data)
self.active_credentials = data
self.project_widget.error = False
self.region_widget.error = False
if self.project_widget.v_model != self.project:
self.project_widget.v_model = self.project
self.region_widget.v_model = None
self.cluster_widget.items = []
self.filter_widget.items = []
self._update_widgets_placeholder_text()
def _update_cluster_list_on_region(self, _widget, _event, data):
if self.account_widget.v_model is not None and self.project_widget.v_model is not None:
self.initialize_credentials_with_auth_account_selection(self.account_widget.v_model)
try:
_, _ = get_component_gateway_url(self.project_widget.v_model, data, None,
self.credentials)
self.region_widget.error = False
self.project_widget.error = False
client = dataproc_v1beta2.ClusterControllerClient(
credentials=self.credentials,
client_options={
"api_endpoint": f"{data}-dataproc.googleapis.com:443"
}
)
self.cluster_widget.items, self.filter_widget.items = get_cluster_pool(
self.project_widget.v_model, data, client
)
self._update_widgets_placeholder_text()
except IndexError:
self.region_widget.error = False
pass
except:
self.region_widget.error = True
ipython_display.send_error("Please make sure you have entered a correct Project "\
"ID and Region.")
self.cluster_widget.placeholder = constants.NO_CLUSTERS_FOUND_MESSAGE
self.filter_widget.placeholder = constants.NO_FILTERS_FOUND_MESSAGE
self.cluster_widget.items = []
self.filter_widget.items = []
def _update_cluster_list_on_filter(self, _widget, _event, data):
self.initialize_credentials_with_auth_account_selection(self.account_widget.v_model)
if self.project_widget.v_model != self.project and self.project is not None:
self.project_widget.v_model = self.project
#we need to update filters and clusters now
if self.region_widget.v_model is not None:
try:
client = dataproc_v1beta2.ClusterControllerClient(
credentials=self.credentials,
client_options={
"api_endpoint": f"{self.region_widget.v_model}-dataproc.googleapis.com:443"
}
)
#we update cluster dropdown
self.cluster_widget.items, _ = get_cluster_pool(
self.project_widget.v_model, self.region_widget.v_model, client, data
)
self._update_widgets_placeholder_text()
except Exception as caught_exc:
self.cluster_widget.placeholder = constants.NO_CLUSTERS_FOUND_MESSAGE
self.filter_widget.placeholder = constants.NO_FILTERS_FOUND_MESSAGE
ipython_display.send_error(f"Failed to create a client with the api_endpoint: "\
f"{self.region_widget.v_model}-dataproc.googleapis.com:443 due to an error: "\
f"{str(caught_exc)}")
def _update_widgets_placeholder_text(self):
"""Helper method to update the cluster and filters placeholder text"""
if len(self.cluster_widget.items) != 0:
self.cluster_widget.placeholder = constants.SELECT_CLUSTER_MESSAGE
else:
self.cluster_widget.placeholder = constants.NO_CLUSTERS_FOUND_MESSAGE
if len(self.filter_widget.items) != 0:
self.filter_widget.placeholder = constants.SELECT_FILTER_MESSAGE
else:
self.filter_widget.placeholder = constants.NO_FILTERS_FOUND_MESSAGE
def initialize_credentials_with_auth_account_selection(self, account):
"""Initializes self.credentials with the accound selected from the auth dropdown widget"""
if account != self.active_credentials:
if account == 'default-credentials':
self.credentials, self.project = google.auth.default(scopes=self.scopes)
else:
self.credentials, self.project = get_credentials_for_account(account, self.scopes)
def update_with_widget_values(self):
"""Updates url to be the component gateway url of the cluster found with the project,
region, and cluster textbox widget values"""
no_credentials_exception = BadUserConfigurationException(
"Failed to obtain access token. Run `gcloud auth login` in your command line "\
"to authorize gcloud to access the Cloud Platform with Google user credentials to "\
"authenticate. Run `gcloud auth application-default login` acquire new user "\
"credentials to use for Application Default Credentials.")
if self.credentials is not None:
try:
self.initialize_credentials_with_auth_account_selection(self.account_widget.v_model)
self.url, self.cluster_widget.v_model = get_component_gateway_url(
self.project_widget.v_model, self.region_widget.v_model,
self.cluster_widget.v_model, self.credentials
)
except:
raise
else:
raise no_credentials_exception
def __call__(self, request):
if not self.credentials.valid:
self.credentials.refresh(self.callable_request)
request.headers['Authorization'] = f'Bearer {self.credentials.token}'
return request
def __hash__(self):
return hash((self.active_credentials, self.url, self.__class__.__name__))
| {
"content_hash": "6604fd22afe5b778f104e84e523956ea",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 122,
"avg_line_length": 45.00561797752809,
"alnum_prop": 0.605126284691882,
"repo_name": "GoogleCloudDataproc/dataprocmagic",
"id": "09758b3b66adec674566ed6835d1b4e256cfb765",
"size": "24610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "googledataprocauthenticator/google.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "93081"
}
],
"symlink_target": ""
} |
import gzip
import protolib
import sys
# Import the packet proto definitions. If they are not found, attempt
# to generate them automatically. This assumes that the script is
# executed from the gem5 root.
try:
import packet_pb2
except:
print "Did not find packet proto definitions, attempting to generate"
from subprocess import call
error = call(['protoc', '--python_out=util', '--proto_path=src/proto',
'src/proto/packet.proto'])
if not error:
print "Generated packet proto definitions"
try:
import google.protobuf
except:
print "Please install Python protobuf module"
exit(-1)
import packet_pb2
else:
print "Failed to import packet proto definitions"
exit(-1)
def main():
if len(sys.argv) != 3:
print "Usage: ", sys.argv[0], " <protobuf input> <ASCII output>"
exit(-1)
try:
# First see if this file is gzipped
try:
# Opening the file works even if it is not a gzip file
proto_in = gzip.open(sys.argv[1], 'rb')
# Force a check of the magic number by seeking in the
# file. If we do not do it here the error will occur when
# reading the first message.
proto_in.seek(1)
proto_in.seek(0)
except IOError:
proto_in = open(sys.argv[1], 'rb')
except IOError:
print "Failed to open ", sys.argv[1], " for reading"
exit(-1)
try:
ascii_out = open(sys.argv[2], 'w')
except IOError:
print "Failed to open ", sys.argv[2], " for writing"
exit(-1)
# Read the magic number in 4-byte Little Endian
magic_number = proto_in.read(4)
if magic_number != "gem5":
print "Unrecognized file", sys.argv[1]
exit(-1)
print "Parsing packet header"
# Add the packet header
header = packet_pb2.PacketHeader()
protolib.decodeMessage(proto_in, header)
print "Object id:", header.obj_id
print "Tick frequency:", header.tick_freq
print "Parsing packets"
num_packets = 0
packet = packet_pb2.Packet()
# Decode the packet messages until we hit the end of the file
while protolib.decodeMessage(proto_in, packet):
num_packets += 1
# ReadReq is 1 and WriteReq is 4 in src/mem/packet.hh Command enum
cmd = 'r' if packet.cmd == 1 else ('w' if packet.cmd == 4 else 'u')
if packet.HasField('pkt_id'):
ascii_out.write('%s,' % (packet.pkt_id))
if packet.HasField('flags'):
ascii_out.write('%s,%s,%s,%s,%s\n' % (cmd, packet.addr, packet.size,
packet.flags, packet.tick))
else:
ascii_out.write('%s,%s,%s,%s\n' % (cmd, packet.addr, packet.size,
packet.tick))
print "Parsed packets:", num_packets
# We're done
ascii_out.close()
proto_in.close()
if __name__ == "__main__":
main()
| {
"content_hash": "327c3cc7502d65622129c238639796eb",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 30.474747474747474,
"alnum_prop": 0.5777262180974478,
"repo_name": "bxshi/gem5",
"id": "e6f36c295afa0cc562a37a098b2568bcd5131c9a",
"size": "5582",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "util/decode_packet_trace.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "239729"
},
{
"name": "C",
"bytes": "974633"
},
{
"name": "C++",
"bytes": "11908945"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "JavaScript",
"bytes": "628"
},
{
"name": "Perl",
"bytes": "385922"
},
{
"name": "Python",
"bytes": "3484139"
},
{
"name": "R",
"bytes": "57296"
},
{
"name": "Ruby",
"bytes": "18948"
},
{
"name": "Shell",
"bytes": "2193"
},
{
"name": "TeX",
"bytes": "19361"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import datetime
from django.db import models
from django.contrib.auth.models import User
from django import forms
from django.conf import settings
def sign(i):
if i > 0: return 1
elif i < 0: return -1
return 0
class Userdata(models.Model):
user = models.OneToOneField(User)
team = models.CharField(max_length=10, choices=settings.TEAM_CHOICES)
punkte = models.IntegerField()
platz = models.IntegerField(default=0)
friends = models.ManyToManyField('self', blank=True, symmetrical=False)
def __unicode__(self):
return self.user.get_full_name()
class Mannschaft(models.Model):
code = models.CharField(max_length=3)
name = models.CharField(max_length=50)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'mannschaften'
class Runde(models.Model):
NAME_CHOICES = (('V1', 'Vorrunde, Spieltag 1'),
('V2', 'Vorrunde, Spieltag 2'),
('V3', 'Vorrunde, Spieltag 3'),
('AF', 'Achtelfinale'),
('VF', 'Viertelfinale'),
('HF', 'Halbfinale'),
# ('3P', 'Spiel um 3. Platz'),
('F', 'Finale'))
name = models.CharField(max_length=2, choices=NAME_CHOICES, unique=True)
faktor = models.IntegerField(default=1)
freigabe = models.IntegerField(default=2)
def __str__(self):
freigaben = { 0: 'Keine', 1: 'Ansicht', 2: 'Tipp' }
return self.get_name() + ' (Freigabe: ' + freigaben[self.freigabe] + ')'
def get_name(self):
names = dict(Runde.NAME_CHOICES)
return names[self.name]
class Meta:
verbose_name_plural = 'Runden'
class Spiel(models.Model):
mannschaft1 = models.ForeignKey(Mannschaft, related_name='ms1')
mannschaft2 = models.ForeignKey(Mannschaft, related_name='ms2')
tore1 = models.IntegerField(blank=True, null=True)
tore2 = models.IntegerField(blank=True, null=True)
datum = models.DateTimeField()
runde = models.ForeignKey(Runde)
def __unicode__(self):
result = self.runde.name + ': ' + self.mannschaft1.name + ' - ' + self.mannschaft2.name
if self.tore1 != None:
result += ' (' + str(self.tore1) + ':' + str(self.tore2) +')'
return result
def tippbar(self):
if self.runde.freigabe != 2: return 0
if self.tore1 != None and self.tore2 != None: return 0
if self.datum - datetime.datetime.now() >= datetime.timedelta(hours=1): return 1
return 0
def save(self):
models.Model.save(self)
# tipps updaten
for t in Tipp.objects.filter(spiel=self.id):
t.punkte = Tipp.NULL
if self.tore1 != None and self.tore2 != None:
if sign(self.tore1 - self.tore2) == sign(t.tore1 - t.tore2): t.punkte = Tipp.TENDENZ
if self.tore1 - self.tore2 == t.tore1 - t.tore2 and self.tore1 != self.tore2:
t.punkte = Tipp.TORDIFFERENZ
if self.tore1 == t.tore1 and self.tore2 == t.tore2:
t.punkte = Tipp.ERGEBNIS
t.save()
ulist = Userdata.objects.all()
for u in ulist:
u.punkte = 0
for t in Tipp.objects.filter(user=u.user):
if t.tore1 != t.tore2:
u.punkte += { Tipp.NULL: 0,
Tipp.TENDENZ: 5,
Tipp.TORDIFFERENZ: 7,
Tipp.ERGEBNIS: 9 }[t.punkte] * t.spiel.runde.faktor
else:
u.punkte += { Tipp.NULL: 0,
Tipp.TENDENZ: 5,
Tipp.TORDIFFERENZ: 7,
Tipp.ERGEBNIS: 7 }[t.punkte] * t.spiel.runde.faktor
u.save()
ulist = Userdata.objects.all().order_by('-punkte')
vorheriger_platz, punkte_vorheriger_platz = 0, 1000
for ctr, u in enumerate(ulist):
if u.punkte == punkte_vorheriger_platz:
u.platz = vorheriger_platz
else:
u.platz = ctr + 1
vorheriger_platz, punkte_vorheriger_platz = u.platz, u.punkte
u.save()
class Meta:
verbose_name_plural = 'Spiele'
unique_together = (('mannschaft1', 'mannschaft2', 'datum'),)
class Tipp(models.Model):
user = models.ForeignKey(User)
spiel = models.ForeignKey(Spiel)
tore1 = models.IntegerField()
tore2 = models.IntegerField()
punkte = models.IntegerField()
NULL = 0
TENDENZ = 1
TORDIFFERENZ = 2
ERGEBNIS = 3
def __unicode__(self):
return ' '.join([self.user.username, unicode(self.spiel), unicode(self.tore1), unicode(self.tore2) ])
class Meta:
unique_together = (('user', 'spiel'),)
| {
"content_hash": "df788920c43ddb0785a0e098eeddc3d7",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 109,
"avg_line_length": 33.12244897959184,
"alnum_prop": 0.5567878414458821,
"repo_name": "ugoertz/tippspiel",
"id": "daa18a4ebc0e12f90f693905aef586e9288bf3b8",
"size": "4869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tipps/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14437"
},
{
"name": "Python",
"bytes": "40934"
}
],
"symlink_target": ""
} |
import contextlib
import dataclasses
import re
import os
from typing import Any, Callable, List, Optional, Sequence, Tuple
from absl.testing import absltest
from absl import logging
import jax
from jax import dtypes
from jax import numpy as jnp
from jax._src import test_util as jtu
from jax import tree_util
from jax.config import config
from jax.experimental import jax2tf
from jax._src import util
from jax._src.lib import xla_bridge
import numpy as np
import tensorflow as tf # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
DType = Any
def _make_tf_input_signature(*tf_args) -> List[tf.TensorSpec]:
# tf_args can be PyTrees
def _make_one_array_signature(tf_arg):
return tf.TensorSpec(np.shape(tf_arg), jax2tf.dtype_of_val(tf_arg))
return tf.nest.map_structure(_make_one_array_signature, list(tf_args))
def _run_tf_function(func_tf: Callable, *tf_args, mode: str):
if mode == "eager":
return func_tf(*tf_args) # EAGER
elif mode == "graph":
return tf.function(
func_tf,
autograph=False,
input_signature=_make_tf_input_signature(*tf_args))(*tf_args) # GRAPH
elif mode == "compiled":
# Adding an explicit input_signature prevents TF from constant-folding
# the computation eagerly before compilation
return tf.function(
func_tf,
autograph=False,
jit_compile=True,
input_signature=_make_tf_input_signature(*tf_args))(
*tf_args) # COMPILED
else:
assert False, (
f"Expected 'eager', 'graph', or 'compiled' for mode: got '{mode}'")
## Helper functions for matching OpMetadata in TF graphs
@dataclasses.dataclass(order=True, frozen=True)
class OpMetadataGraph:
tf_type: str # The standard Tf.Operation.type
op_type: str # The rest are OpMetadata fields from _Xla... attributes
op_name: str
source_file: str
source_line: str
def SaveAndLoadModel(model: tf.Module,
save_gradients=True) -> tf.Module:
# Roundtrip through saved model on disk.
model_dir = os.path.join(absltest.get_default_test_tmpdir(), str(id(model)))
tf.saved_model.save(
model, model_dir,
options=tf.saved_model.SaveOptions(experimental_custom_gradients=save_gradients))
restored_model = tf.saved_model.load(model_dir)
return restored_model
def SaveAndLoadFunction(f_tf: Callable, *,
input_signature: Optional[Sequence[tf.TensorSpec]] = None,
input_args: Optional[Sequence[Any]] = None,
variables: Sequence[tf.Variable] = (),
save_gradients=True) -> Tuple[Callable, tf.train.Checkpoint]:
# Roundtrip through saved model on disk. Return the Checkpoint also
# for the cases when there are variables. If you don't pass input_signature
# then it is created from the input_args.
model = tf.train.Checkpoint()
if input_signature is None:
assert input_args is not None
input_signature = tf.nest.map_structure(lambda a: tf.TensorSpec(a.shape, a.dtype),
input_args)
else:
assert input_args is None
model.f = tf.function(f_tf,
autograph=False,
input_signature=input_signature)
model.variables = variables
restored = SaveAndLoadModel(model, save_gradients=save_gradients)
return restored.f, restored
def TransformJaxVJP(f: Callable, args, res_f_of_args):
# Given `f` and its `args` tuple and `res_f_of_args=f(*args)` return a pair of a function
# that computes the VJP of `f` and appropriate arguments tuple.
def make_ct(res):
res_dtype = np.result_type(res)
assert res_dtype != dtypes.float0
# We produce cotangents of the same type as the primal. It does not
# seem to matter whether we feed float0, and avoiding float0 makes things
# simpler with TF.
return np.ones(np.shape(res), dtype=res_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
def TransformTfValueAndGrad(tf_f: Callable, tf_args,
unconnected_gradients=tf.UnconnectedGradients.ZERO):
# Given a TF function `tf_f` and its `tf_args` tuple,
# return a pair of a function that computes both the value and the
# gradient and appropriate arguments tuple.
def wrapped(*tf_args):
tf_vars = tf.nest.map_structure(tf.Variable, tf_args)
with tf.GradientTape() as tape:
res_tf = tf_f(*tf_vars)
grad = tape.gradient(res_tf, tf_vars,
unconnected_gradients=unconnected_gradients)
return (res_tf, grad)
return wrapped, tf_args
def ComputeTfValueAndGrad(tf_f: Callable, tf_args: Sequence,
unconnected_gradients=tf.UnconnectedGradients.ZERO):
assert isinstance(tf_args, Sequence), f"tf_args must be a tuple: {tf_args}"
f1, args1 = TransformTfValueAndGrad(tf_f, tf_args,
unconnected_gradients=unconnected_gradients)
return f1(*args1)
@jtu.with_config(jax_numpy_rank_promotion="allow",
jax_numpy_dtype_promotion='standard')
class JaxToTfTestCase(jtu.JaxTestCase):
def setUp(self):
super().setUp()
# Ensure that all TF ops are created on the proper device (TPU or GPU or CPU)
tf_preferred_devices = (
tf.config.list_logical_devices("TPU") +
tf.config.list_logical_devices("GPU") +
tf.config.list_logical_devices())
self.tf_default_device = tf_preferred_devices[0]
logging.info("Running jax2tf converted code on %s.", self.tf_default_device)
# We need --config=cuda build flag for TF to see the GPUs
self.assertEqual(jtu.device_under_test().upper(),
self.tf_default_device.device_type)
with contextlib.ExitStack() as stack:
stack.enter_context(tf.device(self.tf_default_device))
self.addCleanup(stack.pop_all().close)
def assertDtypesMatch(self, x, y, *, canonicalize_dtypes=True):
"""Compares dtypes across JAX and TF dtypes. Overrides super method."""
def to_numpy_dtype(dt):
return dt if isinstance(dt, np.dtype) else dt.as_numpy_dtype
if not config.x64_enabled and canonicalize_dtypes:
self.assertEqual(
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(x))),
dtypes.canonicalize_dtype(to_numpy_dtype(jtu._dtype(y))))
else:
self.assertEqual(
to_numpy_dtype(jtu._dtype(x)), to_numpy_dtype(jtu._dtype(y)))
def ConvertAndCompare(self,
func_jax: Callable,
*args,
enable_xla: bool = True,
limitations: Sequence = ()):
"""Compares jax_func(*args) with convert(jax_func)(*args).
It compares the result of JAX, TF ("eager" mode),
TF with tf.function ("graph" mode), and TF with
tf.function(jit_compile=True) ("compiled" mode). In each mode,
either we expect to encounter a known limitation, or the value should
match the value from the JAX execution.
Args:
func_jax: the function to invoke (``func_jax(*args)``)
args: the arguments.
enable_xla: if True, allows the use of XLA ops in jax2tf.convert
(default: True).
limitations: the set of limitations for this harness (not yet filtered
by mode).
"""
# Run JAX. Should not fail, we assume that the harness has been filtered
# already by JAX unimplemented primitives.
result_jax = func_jax(*args) # JAX
result_tf = None
func_tf = jax2tf.convert(func_jax, enable_xla=enable_xla)
unexpected_successes: List[str] = []
# Run the "compiled" mode first, it is most important
for mode in ("compiled", "eager", "graph"):
def log_message(extra):
return f"[{self._testMethodName}] mode={mode}: {extra}"
jax2tf_limits = tuple(filter(lambda l: l.filter(mode=mode), limitations))
skip_tf_run = [l for l in jax2tf_limits if l.skip_tf_run]
if skip_tf_run:
logging.info(log_message(f"Skip TF run due to limitations {skip_tf_run}"))
continue
try:
result_tf = _run_tf_function(func_tf, *args, mode=mode)
tf_exception = None
except Exception as e:
tf_exception = e
expect_tf_error = [l for l in jax2tf_limits if l.expect_tf_error]
if tf_exception:
if expect_tf_error:
logging.info(log_message(
"Found expected TF error with enabled limitations "
f"{expect_tf_error}; TF error is {tf_exception}"))
continue
else:
raise tf_exception
else:
if expect_tf_error:
# It is more ergonomic to print all successful modes once
logging.warning(log_message(
f"Unexpected success with known limitations {expect_tf_error}"))
unexpected_successes.append(f"{mode}: {expect_tf_error}")
if (jtu.device_under_test() == "gpu" and
"dot_general_preferred" in self._testMethodName):
logging.info(log_message(f"Arguments are {args}, JAX result is {result_jax}\nand TF result is {result_tf}"))
skip_comparison = [l for l in jax2tf_limits if l.skip_comparison]
if skip_comparison:
logging.warning(log_message(f"Skip result comparison due to {skip_comparison}"))
continue
max_tol = None
max_tol_lim = None if not jax2tf_limits else jax2tf_limits[0].get_max_tolerance_limitation(jax2tf_limits)
if max_tol_lim is not None:
max_tol = max_tol_lim.tol
logging.info(log_message(f"Using tol={max_tol} due to {max_tol_lim}"))
# Convert results to np.arrays
result_tf = tf.nest.map_structure(lambda t: t.numpy(), result_tf) # type: ignore
custom_assert_lim = [l for l in jax2tf_limits if l.custom_assert]
assert len(custom_assert_lim) <= 1, f"Expecting at most one applicable limitation with custom_assert, found {custom_assert_lim}"
try:
err_msg = f"TF mode {mode}."
log_hlo_on_error = mode == "compiled" or jtu.device_under_test() == "tpu"
if log_hlo_on_error:
err_msg += " See the logs for JAX and TF HLO comparisons."
if custom_assert_lim:
logging.info(log_message(f"Running custom_assert with tol={max_tol} due to {custom_assert_lim[0]}"))
custom_assert_lim[0].custom_assert(self, result_jax, result_tf,
args=args, tol=max_tol,
err_msg=err_msg)
else:
logging.info(log_message(f"Running default assert with tol={max_tol}"))
self.assertAllClose(result_jax, result_tf, atol=max_tol, rtol=max_tol,
err_msg=err_msg)
except AssertionError as e:
# Print the HLO for comparison
if not log_hlo_on_error:
print(f"[{self._testMethodName}] Not logging HLO because the "
f"mode was {mode}")
raise
logging.info("[%s] Logging HLO for exception in mode %s: %s",
self._testMethodName, mode, e)
jax_comp = jax.xla_computation(func_jax)(*args)
jax_hlo = jax_comp.as_hlo_text()
logging.info("[%s] JAX NON_OPT HLO\n%s",
self._testMethodName, jax_hlo)
tf_args_signature = _make_tf_input_signature(*args)
# If we give the signature, we cannot pass scalars
tf_args_no_scalars = tuple(
map(lambda a, sig: tf.convert_to_tensor(a, dtype=sig.dtype),
args, tf_args_signature))
tf_func_compiled = tf.function(
func_tf,
autograph=False,
jit_compile=True,
input_signature=tf_args_signature)
tf_hlo = tf_func_compiled.experimental_get_compiler_ir(*tf_args_no_scalars)(
stage="hlo")
logging.info("[%s] TF NON OPT HLO\n{%s}", self._testMethodName,
tf_hlo)
backend = xla_bridge.get_backend()
modules = backend.compile(jax_comp).hlo_modules()
jax_opt_hlo = modules[0].to_string()
logging.info("[%s] JAX OPT HLO\n%s", self._testMethodName,
jax_opt_hlo)
tf_opt_hlo = tf_func_compiled.experimental_get_compiler_ir(*tf_args_no_scalars)(
stage="optimized_hlo")
logging.info("[%s] TF OPT HLO\n%s", self._testMethodName, tf_opt_hlo)
raise
# end "for mode"
if unexpected_successes:
msg = (f"[{self._testMethodName}] The following are unexpected "
"successful modes:\n" + "\n".join(unexpected_successes))
logging.warning(msg)
# Uncomment the below if you want to see warnings as failures
# self.assertEmpty(msg)
return result_jax, result_tf
def TransformConvertAndCompare(self, func: Callable, arg,
transform: Optional[str]):
"""Like ConvertAndCompare but first applies a transformation.
`func` must be a function from one argument to one result. `arg` is
the argument before the transformation.
`transform` can be None, "jit", "jvp", "grad", "vmap", "jvp_vmap",
"grad_vmap"
"""
if transform is None:
return self.ConvertAndCompare(func, arg)
if transform == "jit":
return self.ConvertAndCompare(jax.jit(func), arg)
if transform == "jvp":
t_func = lambda x, xt: jax.jvp(func, (x,), (xt,))
return self.ConvertAndCompare(t_func, arg, np.full_like(arg, 0.1))
if transform == "grad":
return self.ConvertAndCompare(jax.grad(func), arg)
if transform == "vmap":
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(jax.vmap(func), t_arg)
if transform == "jvp_vmap":
jvp_func = lambda x, xt: jax.jvp(jax.vmap(func), (x,), (xt,))
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(jvp_func, t_arg, np.full_like(t_arg, 0.1))
if transform == "grad_vmap":
grad_func = jax.grad(lambda x: jnp.sum(jax.vmap(func)(x)))
t_arg = np.stack([arg] * 4)
return self.ConvertAndCompare(grad_func, t_arg)
assert False, transform
def CheckShapePolymorphism(self, f_jax: Callable, *,
input_signature: Sequence[tf.TensorSpec],
polymorphic_shapes: Optional[Sequence[Any]],
expected_output_signature: Optional[tf.TensorSpec] = None,
enable_xla: bool = True):
"""Converts a function using polymorphic shapes.
Args:
f_jax: a JAX function of `n` arguments
input_signature: used as the input signature for the tf.function.
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
expected_output_signature: if given, this function tests whether the
actual output signature is equal to this one.
enable_xla: Whether to enable XLA conversion for jax2tf.convert.
"""
f_tf = jax2tf.convert(f_jax, polymorphic_shapes=polymorphic_shapes,
enable_xla=enable_xla)
f_tf_func = tf.function(
f_tf, autograph=False, input_signature=input_signature)
concrete_f_tf = f_tf_func.get_concrete_function(*input_signature)
if expected_output_signature:
# Strangely, output_shapes can be a single shape for a function with a
# single result, or a list/tuple of shapes.
concrete_output_tf_shape = concrete_f_tf.output_shapes
if not isinstance(concrete_output_tf_shape, (tuple, list)): # Single result
assert not isinstance(expected_output_signature, (tuple, list))
expected_output_signature = [expected_output_signature]
concrete_output_tf_shape = [concrete_output_tf_shape]
for expected, found in util.safe_zip(expected_output_signature,
concrete_output_tf_shape):
self.assertEqual(tuple(expected.shape), tuple(found))
return f_tf
def TfToHlo(self, tf_fun: Callable, *args):
# Converts a tf.function to HLO text which we can inspect for occurrence of
# substrings. This works whether we use native lowering or not.
tf_function = tf.function(tf_fun, autograph=False, jit_compile=True)
device_name = f"/device:{jtu.device_under_test().upper()}:0"
return tf_function.experimental_get_compiler_ir(*args)(stage="hlo",
device_name=device_name)
def CountLargeTfConstants(self, tf_fun: Callable, *args,
at_least=256):
# A hacky way to count how many "large" constants are embedded in the
# graph. We count the number of characters in the textual representation
# of the constant.
f_tf_graph = tf.function(tf_fun, autograph=False).get_concrete_function(*args).graph.as_graph_def()
if config.jax2tf_default_experimental_native_lowering:
# This way of finding constants may be brittle, if the constant representation
# contains >. It seems tobe hex-encoded, so this may be safe.
large_consts = [m for m in re.findall(r"dense<([^>]+)>", str(f_tf_graph)) if len(m) >= at_least]
else:
# We cannot find the constants just with string matching because their
# representation may contain escaped "
large_consts = [n for n in f_tf_graph.node if n.op == "Const" and len(str(n)) >= at_least]
return len(large_consts)
def CheckOpMetadata(self, jax_fun, x,
expected: Sequence[OpMetadataGraph],
include_xla_op_metadata=True):
"""Checks that the tf.Graph obtained by converting `jax_fun` for argument
`x` contains all the given OpMetadata.
If `not include_xla_op_metadata` then disable the generation of the
OpMetadata attributes, and check that we don't find any ops with
metadata.
"""
f_tf = tf.function(
jax2tf.convert(jax_fun,
include_xla_op_metadata=include_xla_op_metadata),
autograph=False,
input_signature=[tf.TensorSpec(x.shape, x.dtype)])
# Trace the TF function to a graph
f_tf_concrete = f_tf.get_concrete_function(tf.convert_to_tensor(x))
found_tf_ops = []
def iter_nested_graph(graph: tf.Graph):
for n in graph._nodes_by_id.values():
try:
op_metadata = n.get_attr("_XlaOpMetadata")
op_metadata_proto = xla_data_pb2.OpMetadata()
op_metadata_proto.ParseFromString(op_metadata)
found_tf_ops.append(
OpMetadataGraph(
tf_type=n.type,
op_name=op_metadata_proto.op_name,
op_type=op_metadata_proto.op_type,
source_file=op_metadata_proto.source_file,
source_line=op_metadata_proto.source_line))
except ValueError:
continue
# Look for nested graphs. There probably is a better way!
if n.type == "StatelessWhile":
iter_nested_graph(n._body_graph)
iter_nested_graph(n._cond_graph)
if n.type == "StatelessCase":
for idx in range(10): # How can I tell how many cases there are?
branch = getattr(n, f"_branch_graph_{idx}", None)
if branch is None:
break
iter_nested_graph(branch)
iter_nested_graph(f_tf_concrete.graph)
try:
if include_xla_op_metadata:
self.assertContainsSubset(expected, found_tf_ops)
else:
self.assertEmpty(found_tf_ops)
except Exception:
print("Found nodes:\n ", "\n ".join([str(md) for md in found_tf_ops]))
raise
| {
"content_hash": "79cba0f54ef5375bd055f63dcbb50d33",
"timestamp": "",
"source": "github",
"line_count": 470,
"max_line_length": 134,
"avg_line_length": 41.86808510638298,
"alnum_prop": 0.629992885455839,
"repo_name": "google/jax",
"id": "cdf8cc07561598515808f7d2140572b1a5948a19",
"size": "20260",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax/experimental/jax2tf/tests/tf_test_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "25710"
},
{
"name": "C++",
"bytes": "233622"
},
{
"name": "Dockerfile",
"bytes": "1514"
},
{
"name": "Jupyter Notebook",
"bytes": "98807"
},
{
"name": "Python",
"bytes": "7395044"
},
{
"name": "Shell",
"bytes": "17273"
},
{
"name": "Starlark",
"bytes": "88279"
}
],
"symlink_target": ""
} |
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class UtiltyTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(UtiltyTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(os.path.dirname(__file__), ".aiml", False)
class UtiltyAIMLTests(unittest.TestCase):
def setUp(self):
UtiltyAIMLTests.test_client = UtiltyTestClient()
def test_util_function(self):
response = UtiltyAIMLTests.test_client.bot.ask_question("test", "KEITH IS A PROGRAMMER")
self.assertIsNotNone(response)
self.assertEqual(response, 'Ok, I will remember KEITH is a PROGRAMMER .')
| {
"content_hash": "a29528714c7b07e49bb3ff43a6f2561a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 126,
"avg_line_length": 35.083333333333336,
"alnum_prop": 0.7268408551068883,
"repo_name": "CHT5/program-y",
"id": "6e6b86427531635c6b6ef299bbcac004a7e2bb17",
"size": "842",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/aiml_tests/util_tests/test_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1580"
},
{
"name": "Python",
"bytes": "1027605"
},
{
"name": "Shell",
"bytes": "2835"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Request.current'
db.add_column('server_request', 'current',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Request.current'
db.delete_column('server_request', 'current')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'server.computer': {
'Meta': {'ordering': "['serial']", 'object_name': 'Computer'},
'computername': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checkin': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'recovery_key': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'serial': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'server.request': {
'Meta': {'object_name': 'Request'},
'approved': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'auth_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'computer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['server.Computer']"}),
'current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_approved': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_requested': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason_for_approval': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reason_for_request': ('django.db.models.fields.TextField', [], {}),
'requesting_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requesting_user'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['server'] | {
"content_hash": "c69a5feb74a5ca6fde9c66248208b79f",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 182,
"avg_line_length": 67.6219512195122,
"alnum_prop": 0.5538322813345357,
"repo_name": "eahrold/Crypt-Server",
"id": "6c5140991ba95a3d9b2d15e3d214682693795f35",
"size": "5569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/migrations/0008_auto__add_field_request_current.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "59336"
},
{
"name": "Python",
"bytes": "47282"
}
],
"symlink_target": ""
} |
import logging
import os
from pants.backend.codegen.protobuf.subsystems.protoc import Protoc
from pants.base.workunit import WorkUnitLabel
from pants.scm.git import Git
from pants.subsystem.subsystem import Subsystem, SubsystemError
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method
from pants.contrib.go.subsystems.go_distribution import GoDistribution
logger = logging.getLogger(__name__)
class ProtocGenGo(Subsystem):
"""A compiled protobuf plugin that generates Go code.
For details, see https://github.com/golang/protobuf
"""
options_scope = "protoc-gen-go"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--version",
default="v1.1.0",
help="Version of protoc-gen-go plugin to use when generating code",
)
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (Protoc.scoped(cls), GoDistribution,)
@memoized_method
def select(self, context):
self.get_options()
workdir = os.path.join(
self.get_options().pants_workdir,
self.options_scope,
"versions",
self.get_options().version,
)
tool_path = os.path.join(workdir, "bin/protoc-gen-go")
if not os.path.exists(tool_path):
safe_mkdir(workdir, clean=True)
# Checkout the git repo at a given version. `go get` always gets master.
repo = Git.clone(
"https://github.com/golang/protobuf.git",
os.path.join(workdir, "src/github.com/golang/protobuf"),
)
repo.set_state(self.get_options().version)
go = GoDistribution.global_instance()
result, go_cmd = go.execute_go_cmd(
cmd="install",
gopath=workdir,
args=["github.com/golang/protobuf/protoc-gen-go"],
workunit_factory=context.new_workunit,
workunit_labels=[WorkUnitLabel.BOOTSTRAP],
)
if result != 0:
raise SubsystemError(f"{go_cmd} failed with exit code {result}")
logger.info(f"Selected {self.options_scope} binary bootstrapped to: {tool_path}")
return tool_path
| {
"content_hash": "dcf50369db63ee77b3cd23dfd1c97c1f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 89,
"avg_line_length": 33,
"alnum_prop": 0.6209987195902689,
"repo_name": "wisechengyi/pants",
"id": "92a58b14a1633c0f9119a5cfa6af829edd070f4a",
"size": "2475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/go/src/python/pants/contrib/go/subsystems/protoc_gen_go.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import django.test
from django import template
try:
from django.utils import unittest
except ImportError: # Django >= 1.9
import unittest
from ..templatetags import faqtags
from ..models import Topic
class FAQTagsSyntaxTests(unittest.TestCase):
"""
Tests for the syntax/compliation functions.
These are broken out here so that they don't have to be
django.test.TestCases, which are slower.
"""
def compile(self, tagfunc, token_contents):
"""
Mock out a call to a template compliation function.
Assumes the tag doesn't use the parser, so this won't work for block tags.
"""
t = template.base.Token(template.base.TOKEN_BLOCK, token_contents)
return tagfunc(None, t)
def test_faqs_for_topic_compile(self):
t = self.compile(faqtags.faqs_for_topic, "faqs_for_topic 15 'some-slug' as faqs")
self.assertEqual(t.num.var, "15")
self.assertEqual(t.topic.var, "'some-slug'")
self.assertEqual(t.varname, "faqs")
def test_faqs_for_topic_too_few_arguments(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faqs_for_topic,
"faqs_for_topic 15 'some-slug' as")
def test_faqs_for_topic_too_many_arguments(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faqs_for_topic,
"faqs_for_topic 15 'some-slug' as varname foobar")
def test_faqs_for_topic_bad_as(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faqs_for_topic,
"faqs_for_topic 15 'some-slug' blahblah varname")
def test_faq_list_compile(self):
t = self.compile(faqtags.faq_list, "faq_list 15 as faqs")
self.assertEqual(t.num.var, "15")
self.assertEqual(t.varname, "faqs")
def test_faq_list_too_few_arguments(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faq_list,
"faq_list 15")
def test_faq_list_too_many_arguments(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faq_list,
"faq_list 15 as varname foobar")
def test_faq_list_bad_as(self):
self.assertRaises(template.TemplateSyntaxError,
self.compile,
faqtags.faq_list,
"faq_list 15 blahblah varname")
class FAQTagsNodeTests(django.test.TestCase):
"""
Tests for the node classes themselves, and hence the rendering functions.
"""
fixtures = ['faq_test_data.json']
def test_faqs_for_topic_node(self):
context = template.Context()
node = faqtags.FaqListNode(num='5', topic='"silly-questions"', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assertQuerysetEqual(context['faqs'],
['<Question: What is your favorite color?>',
'<Question: What is your quest?>'])
def test_faqs_for_topic_node_variable_arguments(self):
"""
Test faqs_for_topic with a variable arguments.
"""
context = template.Context({'topic': Topic.objects.get(pk=1),
'number': 1})
node = faqtags.FaqListNode(num='number', topic='topic', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assertQuerysetEqual(context['faqs'], ["<Question: What is your favorite color?>"])
def test_faqs_for_topic_node_invalid_variables(self):
context = template.Context()
node = faqtags.FaqListNode(num='number', topic='topic', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assert_("faqs" not in context,
"faqs variable shouldn't have been added to the context.")
def test_faq_list_node(self):
context = template.Context()
node = faqtags.FaqListNode(num='5', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assertQuerysetEqual(context['faqs'],
['<Question: What is your favorite color?>',
'<Question: What is your quest?>',
'<Question: What is Django-fack?>'])
def test_faq_list_node_variable_arguments(self):
"""
Test faqs_for_topic with a variable arguments.
"""
context = template.Context({'topic': Topic.objects.get(pk=1),
'number': 1})
node = faqtags.FaqListNode(num='number', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assertQuerysetEqual(context['faqs'], ["<Question: What is your favorite color?>"])
def test_faq_list_node_invalid_variables(self):
context = template.Context()
node = faqtags.FaqListNode(num='number', varname="faqs")
content = node.render(context)
self.assertEqual(content, "")
self.assert_("faqs" not in context,
"faqs variable shouldn't have been added to the context.")
def test_faq_topic_list(self):
context = template.Context()
node = faqtags.TopicListNode(varname="topic_list")
content = node.render(context)
self.assertEqual(content, "")
self.assert_("topic_list" in context, "topic_list should be in context")
self.assertEqual(len(context['topic_list']), 2)
| {
"content_hash": "f75a888cccd8cbee6a561e777df2b85e",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 95,
"avg_line_length": 41.16438356164384,
"alnum_prop": 0.5760399334442595,
"repo_name": "revsys/django-fack",
"id": "fc711314dbfb6dbd76b1143abfd8b582b1f7f7a5",
"size": "6010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fack/tests/test_templatetags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3474"
},
{
"name": "Python",
"bytes": "34264"
}
],
"symlink_target": ""
} |
from tools.load import LoadMatrix
from numpy import random
lm=LoadMatrix()
random.seed(17)
ground_truth = lm.load_labels('../data/label_train_multiclass.dat')
predicted = lm.load_labels('../data/label_train_multiclass.dat') * 2
parameter_list = [[ground_truth,predicted]]
def evaluation_multiclassaccuracy (ground_truth, predicted):
from shogun import MulticlassLabels
import shogun as sg
ground_truth_labels = MulticlassLabels(ground_truth)
predicted_labels = MulticlassLabels(predicted)
evaluator = sg.evaluation("MulticlassAccuracy")
accuracy = evaluator.evaluate(predicted_labels,ground_truth_labels)
return accuracy
if __name__=='__main__':
print('MulticlassAccuracy')
evaluation_multiclassaccuracy(*parameter_list[0])
| {
"content_hash": "536807b92c5695f7e51c83aac45579fa",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 68,
"avg_line_length": 27.48148148148148,
"alnum_prop": 0.7735849056603774,
"repo_name": "besser82/shogun",
"id": "aef31f603920f134b5e0a034202ceafa834469d0",
"size": "764",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "examples/undocumented/python/evaluation_multiclassaccuracy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "64"
},
{
"name": "Batchfile",
"bytes": "615"
},
{
"name": "C",
"bytes": "12178"
},
{
"name": "C++",
"bytes": "10261995"
},
{
"name": "CMake",
"bytes": "193647"
},
{
"name": "Dockerfile",
"bytes": "2046"
},
{
"name": "GDB",
"bytes": "89"
},
{
"name": "HTML",
"bytes": "2060"
},
{
"name": "MATLAB",
"bytes": "8755"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "286724"
},
{
"name": "SWIG",
"bytes": "385845"
},
{
"name": "Shell",
"bytes": "7267"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
import argparse
# arguments
examples = """examples:
./zfsdist # show operation latency as a histogram
./zfsdist -p 181 # trace PID 181 only
./zfsdist 1 10 # print 1 second summaries, 10 times
./zfsdist -m 5 # 5s summaries, milliseconds
"""
parser = argparse.ArgumentParser(
description="Summarize ZFS operation latency",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=examples)
parser.add_argument("-T", "--notimestamp", action="store_true",
help="don't include timestamp on interval output")
parser.add_argument("-m", "--milliseconds", action="store_true",
help="output in milliseconds")
parser.add_argument("-p", "--pid",
help="trace this PID only")
parser.add_argument("interval", nargs="?",
help="output interval, in seconds")
parser.add_argument("count", nargs="?", default=99999999,
help="number of outputs")
parser.add_argument("--ebpf", action="store_true",
help=argparse.SUPPRESS)
args = parser.parse_args()
pid = args.pid
countdown = int(args.count)
if args.milliseconds:
factor = 1000000
label = "msecs"
else:
factor = 1000
label = "usecs"
if args.interval and int(args.interval) == 0:
print("ERROR: interval 0. Exiting.")
exit()
debug = 0
# define BPF program
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/fs.h>
#include <linux/sched.h>
#define OP_NAME_LEN 8
typedef struct dist_key {
char op[OP_NAME_LEN];
u64 slot;
} dist_key_t;
BPF_HASH(start, u32);
BPF_HISTOGRAM(dist, dist_key_t);
// time operation
int trace_entry(struct pt_regs *ctx)
{
u32 pid = bpf_get_current_pid_tgid();
if (FILTER_PID)
return 0;
u64 ts = bpf_ktime_get_ns();
start.update(&pid, &ts);
return 0;
}
static int trace_return(struct pt_regs *ctx, const char *op)
{
u64 *tsp;
u32 pid = bpf_get_current_pid_tgid();
// fetch timestamp and calculate delta
tsp = start.lookup(&pid);
if (tsp == 0) {
return 0; // missed start or filtered
}
u64 delta = (bpf_ktime_get_ns() - *tsp) / FACTOR;
// store as histogram
dist_key_t key = {.slot = bpf_log2l(delta)};
__builtin_memcpy(&key.op, op, sizeof(key.op));
dist.increment(key);
start.delete(&pid);
return 0;
}
int trace_read_return(struct pt_regs *ctx)
{
char *op = "read";
return trace_return(ctx, op);
}
int trace_write_return(struct pt_regs *ctx)
{
char *op = "write";
return trace_return(ctx, op);
}
int trace_open_return(struct pt_regs *ctx)
{
char *op = "open";
return trace_return(ctx, op);
}
int trace_fsync_return(struct pt_regs *ctx)
{
char *op = "fsync";
return trace_return(ctx, op);
}
"""
bpf_text = bpf_text.replace('FACTOR', str(factor))
if args.pid:
bpf_text = bpf_text.replace('FILTER_PID', 'pid != %s' % pid)
else:
bpf_text = bpf_text.replace('FILTER_PID', '0')
if debug or args.ebpf:
print(bpf_text)
if args.ebpf:
exit()
# load BPF program
b = BPF(text=bpf_text)
# common file functions
if BPF.get_kprobe_functions(b'zpl_iter'):
b.attach_kprobe(event="zpl_iter_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_iter_write", fn_name="trace_entry")
elif BPF.get_kprobe_functions(b'zpl_aio'):
b.attach_kprobe(event="zpl_aio_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_aio_write", fn_name="trace_entry")
else:
b.attach_kprobe(event="zpl_read", fn_name="trace_entry")
b.attach_kprobe(event="zpl_write", fn_name="trace_entry")
b.attach_kprobe(event="zpl_open", fn_name="trace_entry")
b.attach_kprobe(event="zpl_fsync", fn_name="trace_entry")
if BPF.get_kprobe_functions(b'zpl_iter'):
b.attach_kretprobe(event="zpl_iter_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_iter_write", fn_name="trace_write_return")
elif BPF.get_kprobe_functions(b'zpl_aio'):
b.attach_kretprobe(event="zpl_aio_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_aio_write", fn_name="trace_write_return")
else:
b.attach_kretprobe(event="zpl_read", fn_name="trace_read_return")
b.attach_kretprobe(event="zpl_write", fn_name="trace_write_return")
b.attach_kretprobe(event="zpl_open", fn_name="trace_open_return")
b.attach_kretprobe(event="zpl_fsync", fn_name="trace_fsync_return")
print("Tracing ZFS operation latency... Hit Ctrl-C to end.")
# output
exiting = 0
dist = b.get_table("dist")
while (1):
try:
if args.interval:
sleep(int(args.interval))
else:
sleep(99999999)
except KeyboardInterrupt:
exiting = 1
print()
if args.interval and (not args.notimestamp):
print(strftime("%H:%M:%S:"))
dist.print_log2_hist(label, "operation")
dist.clear()
countdown -= 1
if exiting or countdown == 0:
exit()
| {
"content_hash": "464684e434885fec0f4c8f520ae93c78",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 76,
"avg_line_length": 28.335260115606935,
"alnum_prop": 0.653406772745818,
"repo_name": "mcaleavya/bcc",
"id": "6b29b99baddf8cee4bdc30dc993b35e34f807aa4",
"size": "5258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/zfsdist.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "321768"
},
{
"name": "C++",
"bytes": "920975"
},
{
"name": "CMake",
"bytes": "38841"
},
{
"name": "HTML",
"bytes": "2979"
},
{
"name": "LLVM",
"bytes": "4379"
},
{
"name": "Limbo",
"bytes": "6069"
},
{
"name": "Lua",
"bytes": "298149"
},
{
"name": "Makefile",
"bytes": "1481"
},
{
"name": "P4",
"bytes": "9242"
},
{
"name": "Python",
"bytes": "1206933"
},
{
"name": "Shell",
"bytes": "17023"
},
{
"name": "Yacc",
"bytes": "19817"
}
],
"symlink_target": ""
} |
"""Enum backports from standard lib."""
from __future__ import annotations
from enum import Enum
from typing import Any, TypeVar
T = TypeVar("T", bound="StrEnum")
class StrEnum(str, Enum):
"""Partial backport of Python 3.11's StrEnum for our basic use cases."""
def __new__(cls: type[T], value: str, *args: Any, **kwargs: Any) -> T:
"""Create a new StrEnum instance."""
if not isinstance(value, str):
raise TypeError(f"{value!r} is not a string")
return super().__new__(cls, value, *args, **kwargs)
def __str__(self) -> str:
"""Return self.value."""
return str(self.value)
@staticmethod
def _generate_next_value_( # pylint: disable=arguments-differ # https://github.com/PyCQA/pylint/issues/5371
name: str, start: int, count: int, last_values: list[Any]
) -> Any:
"""
Make `auto()` explicitly unsupported.
We may revisit this when it's very clear that Python 3.11's
`StrEnum.auto()` behavior will no longer change.
"""
raise TypeError("auto() is not supported by this implementation")
| {
"content_hash": "78df15421ba00252cf300d1ac943cf29",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 112,
"avg_line_length": 34.09090909090909,
"alnum_prop": 0.6151111111111112,
"repo_name": "GenericStudent/home-assistant",
"id": "21302fe9f7ba4f15e6b306c7b85997b19a77fb12",
"size": "1125",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/backports/enum.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import weakref
import logging
from ..transport import Transport
from ..exceptions import NotFoundError, TransportError
from ..compat import string_types, urlparse
from .indices import IndicesClient
from .cluster import ClusterClient
from .cat import CatClient
from .nodes import NodesClient
from .snapshot import SnapshotClient
from .utils import query_params, _make_path, SKIP_IN_PATH
logger = logging.getLogger('elasticsearch')
def _normalize_hosts(hosts):
"""
Helper function to transform hosts argument to
:class:`~elasticsearch.Elasticsearch` to a list of dicts.
"""
# if hosts are empty, just defer to defaults down the line
if hosts is None:
return [{}]
# passed in just one string
if isinstance(hosts, string_types):
hosts = [hosts]
out = []
# normalize hosts to dicts
for host in hosts:
if isinstance(host, string_types):
if '://' not in host:
host = "//%s" % host
parsed_url = urlparse(host)
h = {"host": parsed_url.hostname}
if parsed_url.port:
h["port"] = parsed_url.port
if parsed_url.scheme == "https":
h['port'] = parsed_url.port or 443
h['use_ssl'] = True
h['scheme'] = 'http'
elif parsed_url.scheme:
h['scheme'] = parsed_url.scheme
if parsed_url.username or parsed_url.password:
h['http_auth'] = '%s:%s' % (parsed_url.username, parsed_url.password)
if parsed_url.path and parsed_url.path != '/':
h['url_prefix'] = parsed_url.path
out.append(h)
else:
out.append(host)
return out
class Elasticsearch(object):
"""
Elasticsearch low-level client. Provides a straightforward mapping from
Python to ES REST endpoints.
The instance has attributes ``cat``, ``cluster``, ``indices``, ``nodes``
and ``snapshot`` that provide access to instances of
:class:`~elasticsearch.client.CatClient`,
:class:`~elasticsearch.client.ClusterClient`,
:class:`~elasticsearch.client.IndicesClient`,
:class:`~elasticsearch.client.NodesClient` and
:class:`~elasticsearch.client.SnapshotClient` respectively. This is the
preferred (and only supported) way to get access to those classes and their
methods.
You can specify your own connection class which should be used by providing
the ``connection_class`` parameter::
# create connection to localhost using the ThriftConnection
es = Elasticsearch(connection_class=ThriftConnection)
If you want to turn on :ref:`sniffing` you have several options (described
in :class:`~elasticsearch.Transport`)::
# create connection that will automatically inspect the cluster to get
# the list of active nodes. Start with nodes running on 'esnode1' and
# 'esnode2'
es = Elasticsearch(
['esnode1', 'esnode2'],
# sniff before doing anything
sniff_on_start=True,
# refresh nodes after a node fails to respond
sniff_on_connection_fail=True,
# and also every 60 seconds
sniffer_timeout=60
)
Different hosts can have different parameters, use a dictionary per node to
specify those::
# connect to localhost directly and another node using SSL on port 443
# and an url_prefix. Note that ``port`` needs to be an int.
es = Elasticsearch([
{'host': 'localhost'},
{'host': 'othernode', 'port': 443, 'url_prefix': 'es', 'use_ssl': True},
])
If using SSL, there are several parameters that control how we deal with
certificates (see :class:`~elasticsearch.Urllib3HttpConnection` for
detailed description of the options)::
es = Elasticsearch(
['localhost:443', 'other_host:443'],
# turn on SSL
use_ssl=True,
# make sure we verify SSL certificates (off by default)
verify_certs=True,
# provide a path to CA certs on disk
ca_certs='/path/to/CA_certs'
)
Alternatively you can use RFC-1738 formatted URLs, as long as they are not
in conflict with other options::
es = Elasticsearch(
[
'http://user:secret@localhost:9200/',
'https://user:secret@other_host:443/production'
],
verify_certs=True
)
"""
def __init__(self, hosts=None, transport_class=Transport, **kwargs):
"""
:arg hosts: list of nodes we should connect to. Node should be a
dictionary ({"host": "localhost", "port": 9200}), the entire dictionary
will be passed to the :class:`~elasticsearch.Connection` class as
kwargs, or a string in the format of ``host[:port]`` which will be
translated to a dictionary automatically. If no value is given the
:class:`~elasticsearch.Urllib3HttpConnection` class defaults will be used.
:arg transport_class: :class:`~elasticsearch.Transport` subclass to use.
:arg kwargs: any additional arguments will be passed on to the
:class:`~elasticsearch.Transport` class and, subsequently, to the
:class:`~elasticsearch.Connection` instances.
"""
self.transport = transport_class(_normalize_hosts(hosts), **kwargs)
# namespaced clients for compatibility with API names
# use weakref to make GC's work a little easier
self.indices = IndicesClient(weakref.proxy(self))
self.cluster = ClusterClient(weakref.proxy(self))
self.cat = CatClient(weakref.proxy(self))
self.nodes = NodesClient(weakref.proxy(self))
self.snapshot = SnapshotClient(weakref.proxy(self))
def __repr__(self):
try:
# get a lost of all connections
cons = self.transport.hosts
# truncate to 10 if there are too many
if len(cons) > 5:
cons = cons[:5] + ['...']
return '<Elasticsearch(%r)>' % cons
except:
# probably operating on custom transport and connection_pool, ignore
return super(Elasticsearch, self).__repr__()
def _bulk_body(self, body):
# if not passed in a string, serialize items and join by newline
if not isinstance(body, string_types):
body = '\n'.join(map(self.transport.serializer.dumps, body))
# bulk body must end with a newline
if not body.endswith('\n'):
body += '\n'
return body
@query_params()
def ping(self, params=None):
"""
Returns True if the cluster is up, False otherwise.
`<http://www.elastic.co/guide/>`_
"""
try:
self.transport.perform_request('HEAD', '/', params=params)
except NotFoundError:
return False
return True
@query_params()
def info(self, params=None):
"""
Get the basic info from the current cluster.
`<http://www.elastic.co/guide/>`_
"""
_, data = self.transport.perform_request('GET', '/', params=params)
return data
@query_params('consistency', 'parent', 'refresh', 'routing',
'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def create(self, index, doc_type, body, id=None, params=None):
"""
Adds a typed JSON document in a specific index, making it searchable.
Behind the scenes this method calls index(..., op_type='create')
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg body: The document
:arg id: Document ID
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg refresh: Refresh the index after performing the operation
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
return self.index(index, doc_type, body, id=id, params=params, op_type='create')
@query_params('consistency', 'op_type', 'parent', 'refresh', 'replication',
'routing', 'timeout', 'timestamp', 'ttl', 'version', 'version_type')
def index(self, index, doc_type, body, id=None, params=None):
"""
Adds or updates a typed JSON document in a specific index, making it searchable.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg body: The document
:arg id: Document ID
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg parent: ID of the parent document
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type, default 'sync', valid
choices are: 'sync', 'async'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('POST' if id in SKIP_IN_PATH else 'PUT',
_make_path(index, doc_type, id), params=params, body=body)
return data
@query_params('parent', 'preference', 'realtime', 'refresh', 'routing')
def exists(self, index, doc_type, id, params=None):
"""
Returns a boolean indicating whether or not given document exists in Elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
try:
self.transport.perform_request('HEAD', _make_path(index, doc_type,
id), params=params)
except NotFoundError:
return False
return True
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'parent', 'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get(self, index, id, doc_type='_all', params=None):
"""
Get a typed JSON document from the index based on its id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id), params=params)
return data
@query_params('_source', '_source_exclude', '_source_include', 'parent',
'preference', 'realtime', 'refresh', 'routing', 'version',
'version_type')
def get_source(self, index, doc_type, id, params=None):
"""
Get the source of a document by it's index, type and id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document; use `_all` to fetch the first
document matching the ID across all types
:arg id: The document ID
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_source'), params=params)
return data
@query_params('_source', '_source_exclude', '_source_include', 'fields',
'preference', 'realtime', 'refresh')
def mget(self, body, index=None, doc_type=None, params=None):
"""
Get multiple documents based on an index, type (optional) and ids.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided in
the URL.
:arg index: The name of the index
:arg doc_type: The type of the document
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg fields: A comma-separated list of fields to return in the response
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_mget'), params=params, body=body)
return data
@query_params('consistency', 'fields', 'lang', 'parent', 'refresh',
'replication', 'retry_on_conflict', 'routing', 'script', 'script_id',
'scripted_upsert', 'timeout', 'timestamp', 'ttl', 'version',
'version_type')
def update(self, index, doc_type, id, body=None, params=None):
"""
Update a document based on a script or partial data provided.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: Document ID
:arg body: The request definition using either `script` or partial `doc`
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg fields: A comma-separated list of fields to return in the response
:arg lang: The script language (default: groovy)
:arg parent: ID of the parent document. Is is only used for routing and
when for the upsert request
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type, default 'sync', valid
choices are: 'sync', 'async'
:arg retry_on_conflict: Specify how many times should the operation be
retried when a conflict occurs (default: 0)
:arg routing: Specific routing value
:arg script: The URL-encoded script definition (instead of using request
body)
:arg script_id: The id of a stored script
:arg scripted_upsert: True if the script referenced in script or
script_id should be called to perform inserts - defaults to false
:arg timeout: Explicit operation timeout
:arg timestamp: Explicit timestamp for the document
:arg ttl: Expiration time for the document
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('POST', _make_path(index,
doc_type, id, '_update'), params=params, body=body)
return data
@query_params('_source', '_source_exclude', '_source_include',
'allow_no_indices', 'analyze_wildcard', 'analyzer', 'default_operator',
'df', 'expand_wildcards', 'explain', 'fielddata_fields', 'fields',
'from_', 'ignore_unavailable', 'lenient', 'lowercase_expanded_terms',
'preference', 'q', 'request_cache', 'routing', 'scroll', 'search_type',
'size', 'sort', 'stats', 'suggest_field', 'suggest_mode',
'suggest_size', 'suggest_text', 'terminate_after', 'timeout',
'track_scores', 'version')
def search(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a search query and get back search hits that match the query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-search.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg body: The search definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg explain: Specify whether to return detailed information about score
computation as part of a hit
:arg fielddata_fields: A comma-separated list of fields to return as the
field data representation of a field for each hit
:arg fields: A comma-separated list of fields to return as part of a hit
:arg from_: Starting offset (default: 0)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg request_cache: Specify if request cache should be used for this
request or not, defaults to index level setting
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'dfs_query_then_fetch', 'count', 'scan'
:arg size: Number of hits to return (default: 10)
:arg sort: A comma-separated list of <field>:<direction> pairs
:arg stats: Specific 'tag' of the request for logging and statistical
purposes
:arg suggest_field: Specify which field to use for suggestions
:arg suggest_mode: Specify suggest mode, default 'missing', valid
choices are: 'missing', 'popular', 'always'
:arg suggest_size: How many suggestions to return in response
:arg suggest_text: The source text for which the suggestions should be
returned
:arg terminate_after: The maximum number of documents to collect for
each shard, upon reaching which the query execution will terminate
early.
:arg timeout: Explicit operation timeout
:arg track_scores: Whether to calculate and return scores even if they
are not used for sorting
:arg version: Specify whether to return document version as part of a
hit
"""
# from is a reserved word so it cannot be used, use from_ instead
if 'from_' in params:
params['from'] = params.pop('from_')
if doc_type and not index:
index = '_all'
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_search'), params=params, body=body)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'local', 'preference', 'routing')
def search_shards(self, index=None, doc_type=None, params=None):
"""
The search shards api returns the indices and shards that a search
request would be executed against. This can give useful feedback for working
out issues or planning optimizations with routing and shard preferences.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-shards.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg local: Return local information, do not retrieve the state from
master node (default: false)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
"""
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_search_shards'), params=params)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing', 'scroll', 'search_type')
def search_template(self, index=None, doc_type=None, body=None, params=None):
"""
A query that accepts a query template and a map of key/value pairs to
fill in template parameters.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg index: A comma-separated list of index names to search; use `_all`
or empty string to perform the operation on all indices
:arg doc_type: A comma-separated list of document types to search; leave
empty to perform the operation on all types
:arg body: The search definition template and its params
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: A comma-separated list of specific routing values
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch',
'dfs_query_and_fetch', 'count', 'scan'
"""
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_search', 'template'), params=params, body=body)
return data
@query_params('_source', '_source_exclude', '_source_include',
'analyze_wildcard', 'analyzer', 'default_operator', 'df', 'fields',
'lenient', 'lowercase_expanded_terms', 'parent', 'preference', 'q',
'routing')
def explain(self, index, doc_type, id, body=None, params=None):
"""
The explain api computes a score explanation for a query and a specific
document. This can give useful feedback whether a document matches or
didn't match a specific query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-explain.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg body: The query definition using the Query DSL
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg analyze_wildcard: Specify whether wildcards and prefix queries in
the query string query should be analyzed (default: false)
:arg analyzer: The analyzer for the query string query
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The default field for query string query (default: _all)
:arg fields: A comma-separated list of fields to return in the response
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg parent: The ID of the parent document
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_explain'), params=params, body=body)
return data
@query_params('scroll')
def scroll(self, scroll_id=None, body=None, params=None):
"""
Scroll a search request created by specifying the scroll parameter.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: The scroll ID
:arg body: The scroll ID if not passed by URL or query parameter.
:arg scroll: Specify how long a consistent view of the index should be
maintained for scrolled search
"""
if scroll_id in SKIP_IN_PATH and body in SKIP_IN_PATH:
raise ValueError("You need to supply scroll_id or body.")
elif scroll_id and not body:
body = scroll_id
elif scroll_id:
params['scroll_id'] = scroll_id
_, data = self.transport.perform_request('GET', '/_search/scroll',
params=params, body=body)
return data
@query_params()
def clear_scroll(self, scroll_id=None, body=None, params=None):
"""
Clear the scroll request created by specifying the scroll parameter to
search.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_
:arg scroll_id: A comma-separated list of scroll IDs to clear
:arg body: A comma-separated list of scroll IDs to clear if none was
specified via the scroll_id parameter
"""
_, data = self.transport.perform_request('DELETE', _make_path('_search',
'scroll', scroll_id), params=params, body=body)
return data
@query_params('consistency', 'parent', 'refresh', 'replication', 'routing',
'timeout', 'version', 'version_type')
def delete(self, index, doc_type, id, params=None):
"""
Delete a typed JSON document from a specific index based on its id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document
:arg id: The document ID
:arg consistency: Specific write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg parent: ID of parent document
:arg refresh: Refresh the index after performing the operation
:arg replication: Specific replication type, default 'sync', valid
choices are: 'sync', 'async'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('DELETE', _make_path(index,
doc_type, id), params=params)
return data
@query_params('allow_no_indices', 'analyze_wildcard', 'analyzer',
'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable',
'lenient', 'lowercase_expanded_terms', 'min_score', 'preference', 'q',
'routing')
def count(self, index=None, doc_type=None, body=None, params=None):
"""
Execute a query and get the number of matches for that query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-count.html>`_
:arg index: A comma-separated list of indices to restrict the results
:arg doc_type: A comma-separated list of types to restrict the results
:arg body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
if doc_type and not index:
index = '_all'
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_count'), params=params, body=body)
return data
@query_params('consistency', 'fields', 'refresh', 'replication', 'routing', 'timeout')
def bulk(self, body, index=None, doc_type=None, params=None):
"""
Perform many index/delete operations in a single API call.
See the :func:`~elasticsearch.helpers.bulk` helper function for a more
friendly API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html>`_
:arg body: The operation definition and data (action-data pairs),
separated by newlines
:arg index: Default index for items which don't provide one
:arg doc_type: Default document type for items which don't provide one
:arg consistency: Explicit write consistency setting for the operation,
valid choices are: 'one', 'quorum', 'all'
:arg fields: Default comma-separated list of fields to return in the
response for updates
:arg refresh: Refresh the index after performing the operation
:arg replication: Explicitely set the replication type, default 'sync',
valid choices are: 'sync', 'async'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
_, data = self.transport.perform_request('POST', _make_path(index,
doc_type, '_bulk'), params=params, body=self._bulk_body(body))
return data
@query_params('search_type')
def msearch(self, body, index=None, doc_type=None, params=None):
"""
Execute several search requests within the same API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-multi-search.html>`_
:arg body: The request definitions (metadata-search request definition
pairs), separated by newlines
:arg index: A comma-separated list of index names to use as default
:arg doc_type: A comma-separated list of document types to use as
default
:arg search_type: Search operation type, valid choices are:
'query_then_fetch', 'query_and_fetch', 'dfs_query_then_fetch',
'dfs_query_and_fetch', 'count', 'scan'
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_msearch'), params=params, body=self._bulk_body(body))
return data
@query_params('allow_no_indices', 'analyzer', 'consistency',
'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable', 'q',
'replication', 'routing', 'timeout')
def delete_by_query(self, index, doc_type=None, body=None, params=None):
"""
Delete documents from one or more indices and one or more types based on a query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-delete-by-query.html>`_
:arg index: A comma-separated list of indices to restrict the operation;
use `_all` to perform the operation on all indices
:arg doc_type: A comma-separated list of types to restrict the operation
:arg body: A query to restrict the operation specified with the Query
DSL
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyzer: The analyzer to use for the query string
:arg consistency: Specific write consistency setting for the operation
:arg default_operator: The default operator for query string query (AND
or OR), default u'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default u'open'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg q: Query in the Lucene query string syntax
:arg replication: Specific replication type, default 'sync', valid
choices are: 'sync', 'async'
:arg routing: Specific routing value
:arg timeout: Explicit operation timeout
"""
if index in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'index'.")
_, data = self.transport.perform_request('DELETE', _make_path(index, doc_type, '_query'),
params=params, body=body)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'preference', 'routing')
def suggest(self, body, index=None, params=None):
"""
The suggest feature suggests similar looking terms based on a provided
text by using a suggester.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-suggesters.html>`_
:arg body: The request definition
:arg index: A comma-separated list of index names to restrict the
operation; use `_all` or empty string to perform the operation on
all indices
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: Specific routing value
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
_, data = self.transport.perform_request('POST', _make_path(index,
'_suggest'), params=params, body=body)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'percolate_format', 'percolate_index', 'percolate_preference',
'percolate_routing', 'percolate_type', 'preference', 'routing',
'version', 'version_type')
def percolate(self, index, doc_type, id=None, body=None, params=None):
"""
The percolator allows to register queries against an index, and then
send percolate requests which include a doc, and getting back the
queries that match on that doc out of the set of registered queries.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html>`_
:arg index: The index of the document being percolated.
:arg doc_type: The type of the document being percolated.
:arg id: Substitute the document in the request body with a document
that is known by the specified id. On top of the id, the index and
type parameter will be used to retrieve the document from within the
cluster.
:arg body: The percolator request definition using the percolate DSL
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg percolate_format: Return an array of matching query IDs instead of
objects, valid choices are: 'ids'
:arg percolate_index: The index to percolate the document into. Defaults
to index.
:arg percolate_preference: Which shard to prefer when executing the
percolate request.
:arg percolate_routing: The routing value to use when percolating the
existing document.
:arg percolate_type: The type to percolate document into. Defaults to
type.
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: A comma-separated list of specific routing values
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_percolate'), params=params, body=body)
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable')
def mpercolate(self, body, index=None, doc_type=None, params=None):
"""
The percolator allows to register queries against an index, and then
send percolate requests which include a doc, and getting back the
queries that match on that doc out of the set of registered queries.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html>`_
:arg body: The percolate request definitions (header & body pair),
separated by newlines
:arg index: The index of the document being count percolated to use as
default
:arg doc_type: The type of the document being percolated to use as
default.
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_mpercolate'), params=params, body=self._bulk_body(body))
return data
@query_params('allow_no_indices', 'expand_wildcards', 'ignore_unavailable',
'percolate_index', 'percolate_type', 'preference', 'routing', 'version',
'version_type')
def count_percolate(self, index, doc_type, id=None, body=None, params=None):
"""
The percolator allows to register queries against an index, and then
send percolate requests which include a doc, and getting back the
queries that match on that doc out of the set of registered queries.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-percolate.html>`_
:arg index: The index of the document being count percolated.
:arg doc_type: The type of the document being count percolated.
:arg id: Substitute the document in the request body with a document
that is known by the specified id. On top of the id, the index and
type parameter will be used to retrieve the document from within the
cluster.
:arg body: The count percolator request definition using the percolate
DSL
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg percolate_index: The index to count percolate the document into.
Defaults to index.
:arg percolate_type: The type to count percolate document into. Defaults
to type.
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg routing: A comma-separated list of specific routing values
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_percolate', 'count'), params=params, body=body)
return data
@query_params('boost_terms', 'include', 'max_doc_freq', 'max_query_terms',
'max_word_length', 'min_doc_freq', 'min_term_freq', 'min_word_length',
'mlt_fields', 'percent_terms_to_match', 'routing', 'search_from',
'search_indices', 'search_query_hint', 'search_scroll', 'search_size',
'search_source', 'search_type', 'search_types', 'stop_words')
def mlt(self, index, doc_type, id, body=None, params=None):
"""
Get documents that are "like" a specified document.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-more-like-this.html>`_
:arg index: The name of the index
:arg doc_type: The type of the document (use `_all` to fetch the first
document matching the ID across all types)
:arg id: The document ID
:arg body: A specific search request definition
:arg boost_terms: The boost factor
:arg include: Whether to include the queried document from the response
:arg max_doc_freq: The word occurrence frequency as count: words with
higher occurrence in the corpus will be ignored
:arg max_query_terms: The maximum query terms to be included in the generated query
:arg max_word_length: The minimum length of the word: longer words will be ignored
:arg min_doc_freq: The word occurrence frequency as count: words with
lower occurrence in the corpus will be ignored
:arg min_term_freq: The term frequency as percent: terms with lower
occurence in the source document will be ignored
:arg min_word_length: The minimum length of the word: shorter words will be ignored
:arg mlt_fields: Specific fields to perform the query against
:arg percent_terms_to_match: How many terms have to match in order to
consider the document a match (default: 0.3)
:arg routing: Specific routing value
:arg search_from: The offset from which to return results
:arg search_indices: A comma-separated list of indices to perform the
query against (default: the index containing the document)
:arg search_query_hint: The search query hint
:arg search_scroll: A scroll search request definition
:arg search_size: The number of documents to return (default: 10)
:arg search_source: A specific search request definition (instead of
using the request body)
:arg search_type: Specific search type (eg. `dfs_then_fetch`, `count`, etc)
:arg search_types: A comma-separated list of types to perform the query
against (default: the same type as the document)
:arg stop_words: A list of stop words to be ignored
"""
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index, doc_type, id, '_mlt'),
params=params, body=body)
return data
@query_params('dfs', 'field_statistics', 'fields', 'offsets', 'parent',
'payloads', 'positions', 'preference', 'realtime', 'routing',
'term_statistics', 'version', 'version_type')
def termvectors(self, index, doc_type, id=None, body=None, params=None):
"""
Returns information and statistics on terms in the fields of a
particular document. The document could be stored in the index or
artificially provided by the user (Added in 1.4). Note that for
documents stored in the index, this is a near realtime API as the term
vectors are not available until the next refresh.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-termvectors.html>`_
:arg index: The index in which the document resides.
:arg doc_type: The type of the document.
:arg id: The id of the document, when not specified a doc param should
be supplied.
:arg body: Define parameters and or supply a document to get termvectors
for. See documentation.
:arg dfs: Specifies if distributed frequencies should be returned
instead shard frequencies., default False
:arg field_statistics: Specifies if document count, sum of document
frequencies and sum of total term frequencies should be returned.,
default True
:arg fields: A comma-separated list of fields to return.
:arg offsets: Specifies if term offsets should be returned., default
True
:arg parent: Parent id of documents.
:arg payloads: Specifies if term payloads should be returned., default
True
:arg positions: Specifies if term positions should be returned., default
True
:arg preference: Specify the node or shard the operation should be
performed on (default: random).
:arg realtime: Specifies if request is real-time as opposed to near-
real-time (default: true).
:arg routing: Specific routing value.
:arg term_statistics: Specifies if total term frequency and document
frequency should be returned., default False
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (index, doc_type):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_termvectors'), params=params, body=body)
return data
@query_params('dfs', 'field_statistics', 'fields', 'offsets', 'parent',
'payloads', 'positions', 'preference', 'realtime', 'routing',
'term_statistics', 'version', 'version_type')
def termvector(self, index, doc_type, id, body=None, params=None):
for param in (index, doc_type, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, id, '_termvector'), params=params, body=body)
return data
termvector.__doc__ = termvectors.__doc__
@query_params('field_statistics', 'fields', 'ids', 'offsets', 'parent',
'payloads', 'positions', 'preference', 'realtime', 'routing',
'term_statistics', 'version', 'version_type')
def mtermvectors(self, index=None, doc_type=None, body=None, params=None):
"""
Multi termvectors API allows to get multiple termvectors based on an
index, type and id.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-termvectors.html>`_
:arg index: The index in which the document resides.
:arg doc_type: The type of the document.
:arg body: Define ids, documents, parameters or a list of parameters per
document here. You must at least provide a list of document ids. See
documentation.
:arg field_statistics: Specifies if document count, sum of document
frequencies and sum of total term frequencies should be returned.
Applies to all returned documents unless otherwise specified in body
"params" or "docs"., default True
:arg fields: A comma-separated list of fields to return. Applies to all
returned documents unless otherwise specified in body "params" or
"docs".
:arg ids: A comma-separated list of documents ids. You must define ids
as parameter or set "ids" or "docs" in the request body
:arg offsets: Specifies if term offsets should be returned. Applies to
all returned documents unless otherwise specified in body "params"
or "docs"., default True
:arg parent: Parent id of documents. Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg payloads: Specifies if term payloads should be returned. Applies to
all returned documents unless otherwise specified in body "params"
or "docs"., default True
:arg positions: Specifies if term positions should be returned. Applies
to all returned documents unless otherwise specified in body
"params" or "docs"., default True
:arg preference: Specify the node or shard the operation should be
performed on (default: random) .Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg realtime: Specifies if requests are real-time as opposed to near-
real-time (default: true).
:arg routing: Specific routing value. Applies to all returned documents
unless otherwise specified in body "params" or "docs".
:arg term_statistics: Specifies if total term frequency and document
frequency should be returned. Applies to all returned documents
unless otherwise specified in body "params" or "docs"., default
False
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
_, data = self.transport.perform_request('GET', _make_path(index,
doc_type, '_mtermvectors'), params=params, body=body)
return data
@query_params('op_type', 'version', 'version_type')
def put_script(self, lang, id, body, params=None):
"""
Create a script in given language with specified ID.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html>`_
:arg lang: Script language
:arg id: Script ID
:arg body: The document
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (lang, id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('PUT', _make_path('_scripts',
lang, id), params=params, body=body)
return data
@query_params('version', 'version_type')
def get_script(self, lang, id, params=None):
"""
Retrieve a script from the API.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html>`_
:arg lang: Script language
:arg id: Script ID
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (lang, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('GET', _make_path('_scripts',
lang, id), params=params)
return data
@query_params('version', 'version_type')
def delete_script(self, lang, id, params=None):
"""
Remove a stored script from elasticsearch.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-scripting.html>`_
:arg lang: Script language
:arg id: Script ID
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (lang, id):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('DELETE',
_make_path('_scripts', lang, id), params=params)
return data
@query_params('op_type', 'version', 'version_type')
def put_template(self, id, body, params=None):
"""
Create a search template.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: Template ID
:arg body: The document
:arg op_type: Explicit operation type, default 'index', valid choices
are: 'index', 'create'
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
for param in (id, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
_, data = self.transport.perform_request('PUT', _make_path('_search',
'template', id), params=params, body=body)
return data
@query_params('version', 'version_type')
def get_template(self, id, params=None):
"""
Retrieve a search template.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: Template ID
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
if id in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'id'.")
_, data = self.transport.perform_request('GET', _make_path('_search',
'template', id), params=params)
return data
@query_params('version', 'version_type')
def delete_template(self, id=None, params=None):
"""
Delete a search template.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: Template ID
:arg version: Explicit version number for concurrency control
:arg version_type: Specific version type, valid choices are: 'internal',
'external', 'external_gte', 'force'
"""
_, data = self.transport.perform_request('DELETE', _make_path('_search',
'template', id), params=params)
return data
@query_params('allow_no_indices', 'analyze_wildcard', 'analyzer',
'default_operator', 'df', 'expand_wildcards', 'ignore_unavailable',
'lenient', 'lowercase_expanded_terms', 'min_score', 'preference', 'q',
'routing')
def search_exists(self, index=None, doc_type=None, body=None, params=None):
"""
The exists API allows to easily determine if any matching documents
exist for a provided query.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-exists.html>`_
:arg index: A comma-separated list of indices to restrict the results
:arg doc_type: A comma-separated list of types to restrict the results
:arg body: A query to restrict the results specified with the Query DSL
(optional)
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg analyze_wildcard: Specify whether wildcard and prefix queries
should be analyzed (default: false)
:arg analyzer: The analyzer to use for the query string
:arg default_operator: The default operator for query string query (AND
or OR), default 'OR', valid choices are: 'AND', 'OR'
:arg df: The field to use as default where no field prefix is given in
the query string
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg lenient: Specify whether format-based query failures (such as
providing text to a numeric field) should be ignored
:arg lowercase_expanded_terms: Specify whether query terms should be
lowercased
:arg min_score: Include only documents with a specific `_score` value in
the result
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg q: Query in the Lucene query string syntax
:arg routing: Specific routing value
"""
try:
self.transport.perform_request('POST', _make_path(index,
doc_type, '_search', 'exists'), params=params, body=body)
except NotFoundError:
return False
return True
@query_params('allow_no_indices', 'expand_wildcards', 'fields',
'ignore_unavailable', 'level')
def field_stats(self, index=None, body=None, params=None):
"""
The field stats api allows one to find statistical properties of a
field without executing a search, but looking up measurements that are
natively available in the Lucene index.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/search-field-stats.html>`_
:arg index: A comma-separated list of index names; use `_all` or empty
string to perform the operation on all indices
:arg body: Field json objects containing the name and optionally a range
to filter out indices result, that have results outside the defined
bounds
:arg allow_no_indices: Whether to ignore if a wildcard indices
expression resolves into no concrete indices. (This includes `_all`
string or when no indices have been specified)
:arg expand_wildcards: Whether to expand wildcard expression to concrete
indices that are open, closed or both., default 'open', valid
choices are: 'open', 'closed', 'none', 'all'
:arg fields: A comma-separated list of fields for to get field
statistics for (min value, max value, and more)
:arg ignore_unavailable: Whether specified concrete indices should be
ignored when unavailable (missing or closed)
:arg level: Defines if field stats should be returned on a per index
level or on a cluster wide level, default 'cluster', valid choices
are: 'indices', 'cluster'
"""
_, data = self.transport.perform_request('GET', _make_path(index,
'_field_stats'), params=params, body=body)
return data
@query_params()
def render_search_template(self, id=None, body=None, params=None):
"""
`<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-template.html>`_
:arg id: The id of the stored search template
:arg body: The search definition template and its params
"""
_, data = self.transport.perform_request('GET', _make_path('_render',
'template', id), params=params, body=body)
return data
| {
"content_hash": "0d42f74ba16e3f6870bcb599608bf3f5",
"timestamp": "",
"source": "github",
"line_count": 1365,
"max_line_length": 103,
"avg_line_length": 51.372893772893775,
"alnum_prop": 0.6394101876675603,
"repo_name": "control-center/serviced",
"id": "304e11cd71f181e85e497c5bdf4be5829072970c",
"size": "70124",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pkg/elastic-migration/elasticsearch1/client/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7564"
},
{
"name": "CSS",
"bytes": "44431"
},
{
"name": "Dockerfile",
"bytes": "9894"
},
{
"name": "Gherkin",
"bytes": "77635"
},
{
"name": "Go",
"bytes": "4988525"
},
{
"name": "Groovy",
"bytes": "3181"
},
{
"name": "HTML",
"bytes": "144525"
},
{
"name": "Java",
"bytes": "1884"
},
{
"name": "JavaScript",
"bytes": "796469"
},
{
"name": "Makefile",
"bytes": "35554"
},
{
"name": "Python",
"bytes": "354324"
},
{
"name": "Ruby",
"bytes": "127700"
},
{
"name": "Shell",
"bytes": "67747"
}
],
"symlink_target": ""
} |
from slippymapper import *
from layer import SlippyLayer
| {
"content_hash": "3368d75eba3f96f8d8fedf2797ab458b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 29,
"avg_line_length": 28.5,
"alnum_prop": 0.8421052631578947,
"repo_name": "awmartin/spatialpixel",
"id": "6364ca493fb79da7ed0d36a233373955242d7ec8",
"size": "57",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mapping/slippymapper/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Processing",
"bytes": "34428"
},
{
"name": "Python",
"bytes": "114612"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'ModuleStat.value'
db.alter_column('bigbrother_modulestat', 'value', self.gf('django.db.models.fields.DecimalField')(max_digits=10, decimal_places=2))
def backwards(self, orm):
# Changing field 'ModuleStat.value'
db.alter_column('bigbrother_modulestat', 'value', self.gf('django.db.models.fields.IntegerField')())
models = {
'bigbrother.modulestat': {
'Meta': {'object_name': 'ModuleStat'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modulename': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '2'})
}
}
complete_apps = ['bigbrother']
| {
"content_hash": "982be976bb986eefa3ffae2c034a61cb",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 139,
"avg_line_length": 37.06666666666667,
"alnum_prop": 0.6097122302158273,
"repo_name": "anderspetersson/django-bigbrother",
"id": "3521e1d4685ac7fa93c59429a3b887d52ef549ff",
"size": "1130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigbrother/migrations/0002_auto__chg_field_modulestat_value.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4257"
},
{
"name": "Python",
"bytes": "33076"
},
{
"name": "Shell",
"bytes": "5118"
}
],
"symlink_target": ""
} |
__author__ = 'briannelson'
from SID.Utilities import ConfigUtility
from SIDClient.Controllers import SendToSidWatchServerController
config = ConfigUtility.load('./Config/sidwatch.cfg')
controller = SendToSidWatchServerController(config)
controller.start()
| {
"content_hash": "4d125318e32e7f28ebd308eba21270a2",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 64,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.8160919540229885,
"repo_name": "SidWatch/pySIDWatch",
"id": "38abcf3dd2684bcab03a68090450ac61474ed10c",
"size": "261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/SendToSidWatchServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "95"
},
{
"name": "Python",
"bytes": "24524"
}
],
"symlink_target": ""
} |
from google.cloud import aiplatform
# [START aiplatform_sdk_endpoint_predict_sample]
def endpoint_predict_sample(
project: str, location: str, instances: list, endpoint: str
):
aiplatform.init(project=project, location=location)
endpoint = aiplatform.Endpoint(endpoint)
prediction = endpoint.predict(instances=instances)
print(prediction)
return prediction
# [END aiplatform_sdk_endpoint_predict_sample]
| {
"content_hash": "08bd182c86b276a36b6f257703b984a7",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 25.647058823529413,
"alnum_prop": 0.7522935779816514,
"repo_name": "sasha-gitg/python-aiplatform",
"id": "98b7450c5109f337c77803c3b92caaaf10cab153",
"size": "1013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samples/model-builder/endpoint_predict_sample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "11216304"
},
{
"name": "Shell",
"bytes": "30838"
}
],
"symlink_target": ""
} |
import datetime
import time
import random
import pickle
from datetime import date
import json
from collections import deque
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from tensorboard_logger import configure, log_value
from loveletter.env import LoveLetterEnv
from loveletter.agents.random import AgentRandom
from loveletter.agents.agent import Agent
from loveletter.agents.a3c import AgentA3C
from loveletter.arena import Arena
from loveletter.trainers.a3c_model import ActorCritic
evaluation_episodes = 100
def test(rank, args, shared_model, dtype):
test_ctr = 0
torch.manual_seed(args.seed + rank)
# set up logger
timestring = str(date.today()) + '_' + \
time.strftime("%Hh-%Mm-%Ss", time.localtime(time.time()))
run_name = args.save_name + '_' + timestring
configure("logs/run_" + run_name, flush_secs=5)
env = LoveLetterEnv(AgentRandom(args.seed + rank), args.seed + rank)
env.seed(args.seed + rank)
state = env.reset()
model = ActorCritic(state.shape[0], env.action_space).type(dtype)
model.eval()
state = torch.from_numpy(state).type(dtype)
reward_sum = 0
max_reward = -99999999
max_winrate = 0
rewards_recent = deque([], 100)
done = True
start_time = time.time()
episode_length = 0
while True:
episode_length += 1
# Sync with the shared model
if done:
model.load_state_dict(shared_model.state_dict())
cx = Variable(torch.zeros(1, 256).type(dtype), volatile=True)
hx = Variable(torch.zeros(1, 256).type(dtype), volatile=True)
else:
cx = Variable(cx.data.type(dtype), volatile=True)
hx = Variable(hx.data.type(dtype), volatile=True)
value, logit, (hx, cx) = model(
(Variable(state.unsqueeze(0), volatile=True), (hx, cx)))
prob = F.softmax(logit)
action = prob.max(1)[1].data.cpu().numpy()
state, reward, done, _ = env.step(action[0, 0])
done = done or episode_length >= args.max_episode_length
reward_sum += reward
if done:
rewards_recent.append(reward_sum)
rewards_recent_avg = sum(rewards_recent) / len(rewards_recent)
print(
"{} | Episode Reward {: >4}, Length {: >2} | Avg Reward {:0.2f}".format(
time.strftime("%Hh %Mm %Ss",
time.gmtime(time.time() - start_time)),
reward_sum, episode_length, rewards_recent_avg))
# if not stuck or args.evaluate:
log_value('Reward', reward_sum, test_ctr)
log_value('Reward Average', rewards_recent_avg, test_ctr)
log_value('Episode length', episode_length, test_ctr)
if reward_sum >= max_reward:
# pickle.dump(shared_model.state_dict(), open(args.save_name + '_max' + '.p', 'wb'))
path_output = args.save_name + '_max'
torch.save(shared_model.state_dict(), path_output)
path_now = "{}_{}".format(
args.save_name, datetime.datetime.now().isoformat())
torch.save(shared_model.state_dict(), path_now)
max_reward = reward_sum
win_rate_v_random = Arena.compare_agents_float(
lambda seed: AgentA3C(path_output, dtype, seed),
lambda seed: AgentRandom(seed),
800)
msg = " {} | VsRandom: {: >4}%".format(
datetime.datetime.now().strftime("%c"),
round(win_rate_v_random * 100, 2)
)
print(msg)
log_value('Win Rate vs Random', win_rate_v_random, test_ctr)
if win_rate_v_random > max_winrate:
print("Found superior model at {}".format(datetime.datetime.now().isoformat()))
torch.save(shared_model.state_dict(), "{}_{}_best_{}".format(
args.save_name, datetime.datetime.now().isoformat(), win_rate_v_random))
max_winrate = win_rate_v_random
reward_sum = 0
episode_length = 0
state = env.reset()
test_ctr += 1
if test_ctr % 10 == 0 and not args.evaluate:
# pickle.dump(shared_model.state_dict(), open(args.save_name + '.p', 'wb'))
torch.save(shared_model.state_dict(), args.save_name)
if not args.evaluate:
time.sleep(60)
elif test_ctr == evaluation_episodes:
# Ensure the environment is closed so we can complete the
# submission
env.close()
# gym.upload('monitor/' + run_name, api_key=api_key)
state = torch.from_numpy(state).type(dtype)
| {
"content_hash": "2351bf4471cf7291c78f4f61e783ab37",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 100,
"avg_line_length": 36.81203007518797,
"alnum_prop": 0.5680147058823529,
"repo_name": "user01/love-letter",
"id": "ab4959e7a6f85e486bef2aa3a05ce55dbcad3209",
"size": "4896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "loveletter/trainers/a3c_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "36"
},
{
"name": "Python",
"bytes": "87235"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import numpy as np
import imutils
import cv2
import sys
def main(passwd):
cap = cv2.VideoCapture('rtsp://admin:'+passwd+'@192.168.1.100:554')
cap.set(cv2.CAP_PROP_FPS,10)
print("video", cap.isOpened())
while(cap.isOpened()):
# Capture frame-by-frame
ret, frame = cap.read()
#reduce image size for rpi
frame = imutils.resize(frame, width=640)
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
if len(sys.argv) < 2:
print ("Usage %s <pass>" % sys.argv[0])
sys.exit(1)
main(sys.argv[1])
| {
"content_hash": "864bc216ed53ad9db673a5a8a8e102c0",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.592391304347826,
"repo_name": "squeakus/motiontracker",
"id": "9ca3e8490531fe18dd1f0eb047d7cd151a29a571",
"size": "943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simplevid.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "51103"
},
{
"name": "Shell",
"bytes": "594"
}
],
"symlink_target": ""
} |
"""this is a version of the fileserver that works with wsgi (eg. gunicorn)"""
import os
import os.path
import urllib
import urlparse
import sys
import logging
from random import choice, randint
import string
import fcntl
logger = logging.getLogger("gunicorn.error")
DEBUG=True
# if we're debugging stuff, we log stack traces, otherwise we only log the error message
if DEBUG:
log_exc=logger.exception
else:
log_exc=logger.error
try:
import datablox_engage_adapter.file_locator
using_engage = True
except ImportError:
using_engage = False
if using_engage:
engage_file_locator = datablox_engage_adapter.file_locator.FileLocator()
file_server_keypath = engage_file_locator.get_file_server_key_file()
else:
file_server_keypath = os.path.expanduser('~/datablox_file_server_key')
from block import decrypt_path
FILESERVER_PORT=4990
BLOCK_SIZE = 128000
KEY_MESSAGE = "key="
KEY_MESSAGE_LEN = len(KEY_MESSAGE)
def gen_random(length, chars=string.letters+string.digits):
return ''.join([ choice(chars) for i in range(length) ])
def get_key(path):
"""Get the key for the fileserver. Since there could be
multiple slaves, we need to use file locking to serialize
access to the key file. The first slave to try to get the key
will generate the key and create the file.
"""
path = os.path.abspath(os.path.expanduser(path))
lockfile = path + ".lock"
with open(lockfile, "w") as lf:
fcntl.lockf(lf, fcntl.LOCK_EX)
try:
if not os.path.exists(path):
k = gen_random(8)
with open(path, 'w') as f:
f.write(k)
os.chmod(path, 0400)
logger.info("Generated new keyfile at %s" % path)
return k
else:
with open(path, "r") as f:
logger.info("Reading existing keyfile at %s" % path)
return f.read().rstrip()
finally:
fcntl.lockf(lf, fcntl.LOCK_UN)
deskey = get_key(file_server_keypath)
error_headers = [("content-type", "text/plain")]
def send_file(path, size):
with open(path) as f:
block = f.read(BLOCK_SIZE)
while block:
yield block
block = f.read(BLOCK_SIZE)
def app(environ, start_response):
path = None
try:
qs = environ.get("QUERY_STRING")
qdict = urlparse.parse_qs(qs)
enc_path = qdict["key"][0]
path = decrypt_path(enc_path, deskey)
logger.debug("Decrypted path " + path)
size = os.path.getsize(path)
except KeyError, e:
log_exc("Invalid request(KeyError): %s, query string was '%s'" %
(e, qs))
start_response('404 Page Not Found', error_headers, sys.exc_info())
return ["Invalid request"]
except ValueError, e:
log_exc("Invalid request (ValueError): %s, query string was '%s'" %
(e, qs))
if path:
logger.error("Path was %s" % path)
start_response('404 Page Not Found', error_headers, sys.exc_info())
return ["Invalid request"]
except IOError:
log_exc("Could not open file at %s" % path)
start_response('404 Page Not Found', error_headers, sys.exc_info())
return ["Could not open file at %s" % path]
except Exception, e:
log_exc("Unexpected error %s, query string was '%s'" % (e, qs))
if path:
logger.error("Path was %s" % path)
start_response('500 Internal Server Error', error_headers, sys.exc_info())
return ["Unexpected error %s" % e]
start_response("200 OK", [
("Content-Length", str(size))
])
return send_file(path, size)
| {
"content_hash": "c25361f8148878b48910bdd1a90a3e6a",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 88,
"avg_line_length": 29.203389830508474,
"alnum_prop": 0.6584445734184562,
"repo_name": "mpi-sws-rse/datablox",
"id": "2cbda0e104b69355967974f1e0368016680b2402",
"size": "3447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datablox_framework/datablox_framework/fileserver_wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "378676"
},
{
"name": "Shell",
"bytes": "7299"
}
],
"symlink_target": ""
} |
"""
display all images in a directory as thumbnail image buttons that display
the full image when clicked; requires PIL for JPEGs and thumbnail image
creation; to do: add scrolling if too many thumbs for window!
"""
import os, sys, math
from Tkinter import *
from PIL import Image # <== required for thumbs
from PIL.ImageTk import PhotoImage # <== required for JPEG display
def makeThumbs(imgdir, size=(100, 100), subdir='thumbs'):
"""
get thumbnail images for all images in a directory; for each image, create
and save a new thumb, or load and return an existing thumb; makes thumb
dir if needed; returns a list of (image filename, thumb image object);
caller can also run listdir on thumb dir to load; on bad file types may
raise IOError, or other; caveat: could also check file timestamps;
"""
thumbdir = os.path.join(imgdir, subdir)
if not os.path.exists(thumbdir):
os.mkdir(thumbdir)
thumbs = []
for imgfile in os.listdir(imgdir):
thumbpath = os.path.join(thumbdir, imgfile)
if os.path.exists(thumbpath):
thumbobj = Image.open(thumbpath) # use already created
thumbs.append((imgfile, thumbobj))
else:
print('making', thumbpath)
imgpath = os.path.join(imgdir, imgfile)
try:
imgobj = Image.open(imgpath) # make new thumb
imgobj.thumbnail(size, Image.ANTIALIAS) # best downsize filter
imgobj.save(thumbpath) # type via ext or passed
thumbs.append((imgfile, imgobj))
except: # not always IOError
print("Skipping: ", imgpath)
return thumbs
class ViewOne(Toplevel):
"""
open a single image in a pop-up window when created; photoimage
object must be saved: images are erased if object is reclaimed;
"""
def __init__(self, imgdir, imgfile):
Toplevel.__init__(self)
self.title(imgfile)
imgpath = os.path.join(imgdir, imgfile)
imgobj = PhotoImage(file=imgpath)
Label(self, image=imgobj).pack()
print(imgpath, imgobj.width(), imgobj.height()) # size in pixels
self.savephoto = imgobj # keep reference on me
def viewer(imgdir, kind=Toplevel, cols=None):
"""
make thumb links window for an image directory: one thumb button per image;
use kind=Tk to show in main app window, or Frame container (pack); imgfile
differs per loop: must save with a default; photoimage objs must be saved:
erased if reclaimed; packed row frames (versus grids, fixed-sizes, canvas);
"""
win = kind()
win.title('Viewer: ' + imgdir)
quit = Button(win, text='Quit', command=win.quit, bg='beige') # pack first
quit.pack(fill=X, side=BOTTOM) # so clip last
thumbs = makeThumbs(imgdir)
if not cols:
cols = int(math.ceil(math.sqrt(len(thumbs)))) # fixed or N x N
savephotos = []
while thumbs:
thumbsrow, thumbs = thumbs[:cols], thumbs[cols:]
row = Frame(win)
row.pack(fill=BOTH)
for (imgfile, imgobj) in thumbsrow:
photo = PhotoImage(imgobj)
link = Button(row, image=photo)
handler = lambda savefile=imgfile: ViewOne(imgdir, savefile)
link.config(command=handler)
link.pack(side=LEFT, expand=YES)
savephotos.append(photo)
return win, savephotos
if __name__ == '__main__':
imgdir = (len(sys.argv) > 1 and sys.argv[1]) or 'images'
main, save = viewer(imgdir, kind=Tk)
main.mainloop()
| {
"content_hash": "56ad145ad60fc12de02254f07125c4cc",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 82,
"avg_line_length": 43.44318181818182,
"alnum_prop": 0.5932513732670678,
"repo_name": "gmnamra/python-image-utils",
"id": "88c973b4597f6545a49e5837bbe030d0d919a956",
"size": "3823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "viewer_thumbs.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105141"
}
],
"symlink_target": ""
} |
import sys
import numpy as np
import ast
import inspect
import operator
import types as pytypes
from contextlib import contextmanager
from copy import deepcopy
import numba
from numba import njit, stencil
from numba.core.utils import PYVERSION
from numba.core import types, registry
from numba.core.compiler import compile_extra, Flags
from numba.core.cpu import ParallelOptions
from numba.tests.support import tag, skip_parfors_unsupported, _32bit
from numba.core.errors import LoweringError, TypingError
import unittest
skip_unsupported = skip_parfors_unsupported
@stencil
def stencil1_kernel(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
@stencil(neighborhood=((-5, 0), ))
def stencil2_kernel(a):
cum = a[-5]
for i in range(-4, 1):
cum += a[i]
return 0.3 * cum
@stencil(cval=1.0)
def stencil3_kernel(a):
return 0.25 * a[-2, 2]
@stencil
def stencil_multiple_input_kernel(a, b):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_kernel_var(a, b, w):
return w * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0] +
b[0, 1] + b[1, 0] + b[0, -1] + b[-1, 0])
@stencil
def stencil_multiple_input_mixed_types_2d(a, b, f):
return a[0, 0] if f[0, 0] else b[0, 0]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_1d(a, b):
return a[-1] * b[0] + a[0] * b[1]
@stencil(standard_indexing=("b",))
def stencil_with_standard_indexing_2d(a, b):
return (a[0, 1] * b[0, 1] + a[1, 0] * b[1, 0]
+ a[0, -1] * b[0, -1] + a[-1, 0] * b[-1, 0])
@njit
def addone_njit(a):
return a + 1
if not _32bit: # prevent compilation on unsupported 32bit targets
@njit(parallel=True)
def addone_pjit(a):
return a + 1
@unittest.skipIf(PYVERSION != (3, 7), "Run under 3.7 only, AST unstable")
class TestStencilBase(unittest.TestCase):
_numba_parallel_test_ = False
def __init__(self, *args):
# flags for njit()
self.cflags = Flags()
self.cflags.nrt = True
super(TestStencilBase, self).__init__(*args)
def _compile_this(self, func, sig, flags):
return compile_extra(registry.cpu_target.typing_context,
registry.cpu_target.target_context, func, sig,
None, flags, {})
def compile_parallel(self, func, sig, **kws):
flags = Flags()
flags.nrt = True
options = True if not kws else kws
flags.auto_parallel=ParallelOptions(options)
return self._compile_this(func, sig, flags)
def compile_njit(self, func, sig):
return self._compile_this(func, sig, flags=self.cflags)
def compile_all(self, pyfunc, *args, **kwargs):
sig = tuple([numba.typeof(x) for x in args])
# compile with parallel=True
cpfunc = self.compile_parallel(pyfunc, sig)
# compile a standard njit of the original function
cfunc = self.compile_njit(pyfunc, sig)
return cfunc, cpfunc
def check(self, no_stencil_func, pyfunc, *args):
cfunc, cpfunc = self.compile_all(pyfunc, *args)
# results without stencil macro
expected = no_stencil_func(*args)
# python result
py_output = pyfunc(*args)
# njit result
njit_output = cfunc.entry_point(*args)
# parfor result
parfor_output = cpfunc.entry_point(*args)
np.testing.assert_almost_equal(py_output, expected, decimal=3)
np.testing.assert_almost_equal(njit_output, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
# make sure parfor set up scheduling
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
class TestStencil(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestStencil, self).__init__(*args, **kwargs)
@skip_unsupported
def test_stencil1(self):
"""Tests whether the optional out argument to stencil calls works.
"""
def test_with_out(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
B = stencil1_kernel(A, out=B)
return B
def test_without_out(n):
A = np.arange(n**2).reshape((n, n))
B = stencil1_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] +
A[i + 1, j] + A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_with_out, n)
self.check(test_impl_seq, test_without_out, n)
@skip_unsupported
def test_stencil2(self):
"""Tests whether the optional neighborhood argument to the stencil
decorate works.
"""
def test_seq(n):
A = np.arange(n)
B = stencil2_kernel(A)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(5, len(A)):
B[i] = 0.3 * sum(A[i - 5:i + 1])
return B
n = 100
self.check(test_impl_seq, test_seq, n)
# variable length neighborhood in numba.stencil call
# only supported in parallel path
def test_seq(n, w):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w]
for i in range(-w + 1, w + 1):
cum += a[i]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ))(A, w)
return B
def test_impl_seq(n, w):
A = np.arange(n)
B = np.zeros(n)
for i in range(w, len(A) - w):
B[i] = 0.3 * sum(A[i - w:i + w + 1])
return B
n = 100
w = 5
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp))
expected = test_impl_seq(n, w)
# parfor result
parfor_output = cpfunc.entry_point(n, w)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test index_offsets
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
cum = a[-w + 1]
for i in range(-w + 1, w + 1):
cum += a[i + 1]
return 0.3 * cum
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
# test slice in kernel
def test_seq(n, w, offset):
A = np.arange(n)
def stencil2_kernel(a, w):
return 0.3 * np.sum(a[-w + 1:w + 2])
B = numba.stencil(stencil2_kernel, neighborhood=((-w, w), ),
index_offsets=(-offset, ))(A, w)
return B
offset = 1
cpfunc = self.compile_parallel(test_seq, (types.intp, types.intp,
types.intp))
parfor_output = cpfunc.entry_point(n, w, offset)
np.testing.assert_almost_equal(parfor_output, expected, decimal=3)
self.assertIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil3(self):
"""Tests whether a non-zero optional cval argument to the stencil
decorator works. Also tests integer result type.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = stencil3_kernel(A)
return B
test_njit = njit(test_seq)
test_par = njit(test_seq, parallel=True)
n = 5
seq_res = test_seq(n)
njit_res = test_njit(n)
par_res = test_par(n)
self.assertTrue(seq_res[0, 0] == 1.0 and seq_res[4, 4] == 1.0)
self.assertTrue(njit_res[0, 0] == 1.0 and njit_res[4, 4] == 1.0)
self.assertTrue(par_res[0, 0] == 1.0 and par_res[4, 4] == 1.0)
@skip_unsupported
def test_stencil_standard_indexing_1d(self):
"""Tests standard indexing with a 1d array.
"""
def test_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = stencil_with_standard_indexing_1d(A, B)
return C
def test_impl_seq(n):
A = np.arange(n)
B = [3.0, 7.0]
C = np.zeros(n)
for i in range(1, n):
C[i] = A[i - 1] * B[0] + A[i] * B[1]
return C
n = 100
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_standard_indexing_2d(self):
"""Tests standard indexing with a 2d array and multiple stencil calls.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = stencil_with_standard_indexing_2d(A, B)
D = stencil_with_standard_indexing_2d(C, B)
return D
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.ones((3, 3))
C = np.zeros(n**2).reshape((n, n))
D = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = (A[i, j + 1] * B[0, 1] + A[i + 1, j] * B[1, 0] +
A[i, j - 1] * B[0, -1] + A[i - 1, j] * B[-1, 0])
for i in range(1, n - 1):
for j in range(1, n - 1):
D[i, j] = (C[i, j + 1] * B[0, 1] + C[i + 1, j] * B[1, 0] +
C[i, j - 1] * B[0, -1] + C[i - 1, j] * B[-1, 0])
return D
n = 5
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_multiple_inputs(self):
"""Tests whether multiple inputs of the same size work.
"""
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = stencil_multiple_input_kernel(A, B)
return C
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
C = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
C[i, j] = 0.25 * \
(A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j]
+ B[i, j + 1] + B[i + 1, j]
+ B[i, j - 1] + B[i - 1, j])
return C
n = 3
self.check(test_impl_seq, test_seq, n)
# test stencil with a non-array input
def test_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.arange(n**2).reshape((n, n))
w = 0.25
C = stencil_multiple_input_kernel_var(A, B, w)
return C
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_mixed_types(self):
def test_impl_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = np.zeros((n, n), dtype=A.dtype)
for i in range(0, n):
for j in range(0, n):
O[i, j] = A[i, j] if S[i, j] else B[i, j]
return O
def test_seq(n):
A = np.arange(n ** 2).reshape((n, n))
B = n ** 2 - np.arange(n ** 2).reshape((n, n))
S = np.eye(n, dtype=np.bool_)
O = stencil_multiple_input_mixed_types_2d(A, B, S)
return O
n = 3
self.check(test_impl_seq, test_seq, n)
@skip_unsupported
def test_stencil_call(self):
"""Tests 2D numba.stencil calls.
"""
def test_impl1(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
numba.stencil(lambda a: 0.25 * (a[0, 1] + a[1, 0] + a[0, -1]
+ a[-1, 0]))(A, out=B)
return B
def test_impl2(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
def sf(a):
return 0.25 * (a[0, 1] + a[1, 0] + a[0, -1] + a[-1, 0])
B = numba.stencil(sf)(A)
return B
def test_impl_seq(n):
A = np.arange(n**2).reshape((n, n))
B = np.zeros(n**2).reshape((n, n))
for i in range(1, n - 1):
for j in range(1, n - 1):
B[i, j] = 0.25 * (A[i, j + 1] + A[i + 1, j]
+ A[i, j - 1] + A[i - 1, j])
return B
n = 100
self.check(test_impl_seq, test_impl1, n)
self.check(test_impl_seq, test_impl2, n)
@skip_unsupported
def test_stencil_call_1D(self):
"""Tests 1D numba.stencil calls.
"""
def test_impl(n):
A = np.arange(n)
B = np.zeros(n)
numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - 1] + A[i] + A[i + 1])
return B
n = 100
self.check(test_impl_seq, test_impl, n)
@skip_unsupported
def test_stencil_call_const(self):
"""Tests numba.stencil call that has an index that can be inferred as
constant from a unary expr. Otherwise, this would raise an error since
neighborhood length is not specified.
"""
def test_impl1(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
numba.stencil(lambda a,c : 0.3 * (a[-c] + a[0] + a[c]))(
A, c, out=B)
return B
def test_impl2(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[1-c] + a[0] + a[c-1]))(
A, c, out=B)
return B
# recursive expr case
def test_impl3(n):
A = np.arange(n)
B = np.zeros(n)
c = 2
numba.stencil(lambda a,c : 0.3 * (a[-c+1] + a[0] + a[c-1]))(
A, c, out=B)
return B
# multi-constant case
def test_impl4(n):
A = np.arange(n)
B = np.zeros(n)
d = 1
c = 2
numba.stencil(lambda a,c,d : 0.3 * (a[-c+d] + a[0] + a[c-d]))(
A, c, d, out=B)
return B
def test_impl_seq(n):
A = np.arange(n)
B = np.zeros(n)
c = 1
for i in range(1, n - 1):
B[i] = 0.3 * (A[i - c] + A[i] + A[i + c])
return B
n = 100
# constant inference is only possible in parallel path
cpfunc1 = self.compile_parallel(test_impl1, (types.intp,))
cpfunc2 = self.compile_parallel(test_impl2, (types.intp,))
cpfunc3 = self.compile_parallel(test_impl3, (types.intp,))
cpfunc4 = self.compile_parallel(test_impl4, (types.intp,))
expected = test_impl_seq(n)
# parfor result
parfor_output1 = cpfunc1.entry_point(n)
parfor_output2 = cpfunc2.entry_point(n)
parfor_output3 = cpfunc3.entry_point(n)
parfor_output4 = cpfunc4.entry_point(n)
np.testing.assert_almost_equal(parfor_output1, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output2, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output3, expected, decimal=3)
np.testing.assert_almost_equal(parfor_output4, expected, decimal=3)
# check error in regular Python path
with self.assertRaises(ValueError) as e:
test_impl4(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
# check error in njit path
# TODO: ValueError should be thrown instead of LoweringError
with self.assertRaises(LoweringError) as e:
njit(test_impl4)(4)
self.assertIn("stencil kernel index is not constant, "
"'neighborhood' option required", str(e.exception))
@skip_unsupported
def test_stencil_parallel_off(self):
"""Tests 1D numba.stencil calls without parallel translation
turned off.
"""
def test_impl(A):
return numba.stencil(lambda a: 0.3 * (a[-1] + a[0] + a[1]))(A)
cpfunc = self.compile_parallel(test_impl, (numba.float64[:],), stencil=False)
self.assertNotIn('@do_scheduling', cpfunc.library.get_llvm_str())
@skip_unsupported
def test_stencil_nested1(self):
"""Tests whether nested stencil decorator works.
"""
@njit(parallel=True)
def test_impl(n):
@stencil
def fun(a):
c = 2
return a[-c+1]
B = fun(n)
return B
def test_impl_seq(n):
B = np.zeros(len(n), dtype=int)
for i in range(1, len(n)):
B[i] = n[i-1]
return B
n = np.arange(10)
np.testing.assert_equal(test_impl(n), test_impl_seq(n))
@skip_unsupported
def test_out_kwarg_w_cval(self):
""" Issue #3518, out kwarg did not work with cval."""
# test const value that matches the arg dtype, and one that can be cast
const_vals = [7, 7.0]
def kernel(a):
return (a[0, 0] - a[1, 0])
for const_val in const_vals:
stencil_fn = numba.stencil(kernel, cval=const_val)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
# stencil function case
A = np.arange(12).reshape((3, 4))
expected = np.full_like(A, -4)
expected[-1, :] = const_val
ret = np.ones_like(A)
stencil_fn(A, out=ret)
np.testing.assert_almost_equal(ret, expected)
# wrapped function case, check njit, then njit(parallel=True)
impls = self.compile_all(wrapped,)
for impl in impls:
got = impl.entry_point()
np.testing.assert_almost_equal(got, expected)
# now check exceptions for cval dtype mismatch with out kwarg dtype
stencil_fn = numba.stencil(kernel, cval=1j)
def wrapped():
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
stencil_fn(A, out=ret)
return ret
A = np.arange(12).reshape((3, 4))
ret = np.ones_like(A)
with self.assertRaises(ValueError) as e:
stencil_fn(A, out=ret)
msg = "cval type does not match stencil return type."
self.assertIn(msg, str(e.exception))
for compiler in [self.compile_njit, self.compile_parallel]:
try:
compiler(wrapped,())
except(ValueError, LoweringError) as e:
self.assertIn(msg, str(e))
else:
raise AssertionError("Expected error was not raised")
class pyStencilGenerator:
"""
Holds the classes and methods needed to generate a python stencil
implementation from a kernel purely using AST transforms.
"""
class Builder:
"""
Provides code generation for the AST manipulation pipeline.
The class methods largely produce AST nodes/trees.
"""
def __init__(self):
self.__state = 0
ids = [chr(ord(v) + x) for v in ['a', 'A'] for x in range(26)]
def varidx(self):
"""
a monotonically increasing index for use in labelling variables.
"""
tmp = self.__state
self.__state = self.__state + 1
return tmp
# builder functions
def gen_alloc_return(self, orig, var, dtype_var, init_val=0):
"""
Generates an AST equivalent to:
`var = np.full(orig.shape, init_val, dtype = dtype_var)`
"""
new = ast.Assign(
targets=[
ast.Name(
id=var,
ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(
id='np',
ctx=ast.Load()),
attr='full',
ctx=ast.Load()),
args=[
ast.Attribute(
value=ast.Name(
id=orig,
ctx=ast.Load()),
attr='shape',
ctx=ast.Load()),
self.gen_num(init_val)],
keywords=[ast.keyword(arg='dtype',
value=self.gen_call('type', [dtype_var.id]).value)],
starargs=None,
kwargs=None),
)
return new
def gen_assign(self, var, value, index_names):
"""
Generates an AST equivalent to:
`retvar[(*index_names,)] = value[<already present indexing>]`
"""
elts_info = [ast.Name(id=x, ctx=ast.Load()) for x in index_names]
new = ast.Assign(
targets=[
ast.Subscript(
value=ast.Name(
id=var,
ctx=ast.Load()),
slice=ast.Index(
value=ast.Tuple(
elts=elts_info,
ctx=ast.Load())),
ctx=ast.Store())],
value=value)
return new
def gen_loop(self, var, start=0, stop=0, body=None):
"""
Generates an AST equivalent to a loop in `var` from
`start` to `stop` with body `body`.
"""
if isinstance(start, int):
start_val = ast.Num(n=start)
else:
start_val = start
if isinstance(stop, int):
stop_val = ast.Num(n=stop)
else:
stop_val = stop
return ast.For(
target=ast.Name(id=var, ctx=ast.Store()),
iter=ast.Call(
func=ast.Name(id='range', ctx=ast.Load()),
args=[start_val, stop_val],
keywords=[],
starargs=None, kwargs=None),
body=body, orelse=[])
def gen_return(self, var):
"""
Generates an AST equivalent to `return var`
"""
return ast.Return(value=ast.Name(id=var, ctx=ast.Load()))
def gen_slice(self, value):
"""Generates an Index with the given value"""
return ast.Index(value=ast.Num(n=value))
def gen_attr(self, name, attr):
"""
Generates AST equivalent to `name.attr`
"""
return ast.Attribute(
value=ast.Name(id=name, ctx=ast.Load()),
attr=attr, ctx=ast.Load())
def gen_subscript(self, name, attr, index, offset=None):
"""
Generates an AST equivalent to a subscript, something like:
name.attr[slice(index) +/- offset]
"""
attribute = self.gen_attr(name, attr)
slise = self.gen_slice(index)
ss = ast.Subscript(value=attribute, slice=slise, ctx=ast.Load())
if offset:
pm = ast.Add() if offset >= 0 else ast.Sub()
ss = ast.BinOp(left=ss, op=pm, right=ast.Num(n=abs(offset)))
return ss
def gen_num(self, value):
"""
Generates an ast.Num of value `value`
"""
# pretend bools are ints, ast has no boolean literal support
if isinstance(value, bool):
return ast.Num(int(value))
if abs(value) >= 0:
return ast.Num(value)
else:
return ast.UnaryOp(ast.USub(), ast.Num(-value))
def gen_call(self, call_name, args, kwargs=None):
"""
Generates an AST equivalent to a call, something like:
`call_name(*args, **kwargs)
"""
fixed_args = [ast.Name(id='%s' % x, ctx=ast.Load()) for x in args]
if kwargs is not None:
keywords = [ast.keyword(
arg='%s' %
x, value=ast.parse(str(x)).body[0].value)
for x in kwargs]
else:
keywords = []
func = ast.Name(id=call_name, ctx=ast.Load())
return ast.Expr(value=ast.Call(
func=func, args=fixed_args,
keywords=keywords,
starargs=None, kwargs=None), ctx=ast.Load())
# AST transformers
class FoldConst(ast.NodeTransformer, Builder):
"""
Folds const expr, this is so const expressions in the relidx are
more easily handled
"""
# just support a few for testing purposes
supported_ops = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
}
def visit_BinOp(self, node):
# does const expr folding
node = self.generic_visit(node)
op = self.supported_ops.get(node.op.__class__)
lhs = getattr(node, 'left', None)
rhs = getattr(node, 'right', None)
if not (lhs and rhs and op):
return node
if (isinstance(lhs, ast.Num) and
isinstance(rhs, ast.Num)):
return ast.Num(op(node.left.n, node.right.n))
else:
return node
class FixRelIndex(ast.NodeTransformer, Builder):
""" Fixes the relative indexes to be written in as
induction index + relative index
"""
def __init__(self, argnames, const_assigns,
standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = argnames
self._const_assigns = const_assigns
self._idx_len = -1
self._mins = None
self._maxes = None
self._imin = np.iinfo(int).min
self._imax = np.iinfo(int).max
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood
self._id_pat = '__%sn' if neighborhood else '__%s'
def get_val_from_num(self, node):
"""
Gets the literal value from a Num or UnaryOp
"""
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.UnaryOp):
return -node.operand.n
else:
raise ValueError(
"get_val_from_num: Unknown indexing operation")
def visit_Subscript(self, node):
"""
Transforms subscripts of the form `a[x]` and `a[x, y, z, ...]`
where `x, y, z` are relative indexes, to forms such as:
`a[x + i]` and `a[x + i, y + j, z + k]` for use in loop induced
indexing.
"""
def handle2dindex(node):
idx = []
for x, val in enumerate(node.slice.value.elts):
useval = self._const_assigns.get(val, val)
idx.append(
ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load()))
if self._idx_len == -1:
self._idx_len = len(idx)
else:
if(self._idx_len != len(idx)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=ast.Tuple(
elts=idx,
ctx=ast.Load()),
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, lnode in enumerate(node.slice.value.elts):
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
return newnode
def handle1dindex(node):
useval = self._const_assigns.get(
node.slice.value, node.slice.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[0],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.Index(
value=idx,
ctx=ast.Load()),
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax, ]
self._maxes = [self._imin, ]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeSlice(i, node):
def gen_idx(val, x):
useval = self._const_assigns.get(val, val)
value = self.get_val_from_num(val)
tmp = ast.BinOp(
left=ast.Name(
id=self._id_pat % self.ids[x],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
ast.copy_location(tmp, node)
ast.fix_missing_locations(tmp)
return tmp
newnode = ast.Slice(gen_idx(node.lower, i),
gen_idx(node.upper, i),
node.step)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def computeIndex(i, node):
useval = self._const_assigns.get(node.value, node.value)
idx = ast.BinOp(left=ast.Name(
id=self._id_pat % self.ids[i],
ctx=ast.Load()),
op=ast.Add(),
right=useval,
ctx=ast.Load())
newnode = ast.Index(value=idx, ctx=ast.Load())
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleExtSlice(node):
idx = []
for i, val in enumerate(node.slice.dims):
if isinstance(val, ast.Slice):
idx.append(computeSlice(i, val))
if isinstance(val, ast.Index):
idx.append(computeIndex(i, val))
# TODO: handle more node types
if self._idx_len == -1:
self._idx_len = len(node.slice.dims)
else:
if(self._idx_len != len(node.slice.dims)):
raise ValueError(
"Relative indexing mismatch detected")
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=ast.ExtSlice(
dims=idx,
ctx=ast.Load()),
ctx=context
)
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax] * self._idx_len
self._maxes = [self._imin] * self._idx_len
if not self._neighborhood:
for x, anode in enumerate(node.slice.dims):
if isinstance(anode, ast.Slice):
for lnode in [anode.lower, anode.upper]:
if isinstance(lnode, ast.Num) or\
isinstance(lnode, ast.UnaryOp):
relvalue = self.get_val_from_num(lnode)
elif (hasattr(lnode, 'id') and
lnode.id in self._const_assigns):
relvalue = self._const_assigns[lnode.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
val = anode.value
if isinstance(val, ast.Num) or\
isinstance(val, ast.UnaryOp):
relvalue = self.get_val_from_num(val)
elif (hasattr(val, 'id') and
val.id in self._const_assigns):
relvalue = self._const_assigns[val.id]
else:
raise ValueError(
"Cannot interpret indexing value")
if relvalue < self._mins[x]:
self._mins[x] = relvalue
if relvalue > self._maxes[x]:
self._maxes[x] = relvalue
else:
for x, lnode in enumerate(self._neighborhood):
self._mins[x] = self._neighborhood[x][0]
self._maxes[x] = self._neighborhood[x][1]
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
return newnode
def handleSlice(node):
idx = computeSlice(0, node.slice)
idx.ctx=ast.Load()
if isinstance(node.ctx, ast.Store):
msg = ("Assignments to array passed to "
"stencil kernels is not allowed")
raise ValueError(msg)
context = ast.Load()
newnode = ast.Subscript(
value=node.value,
slice=idx,
ctx=context)
ast.copy_location(newnode, node)
ast.fix_missing_locations(newnode)
if self._idx_len == -1:
self._idx_len = 1
else:
if(self._idx_len != 1):
raise ValueError(
"Relative indexing mismatch detected")
# now work out max/min for index ranges i.e. stencil size
if self._mins is None and self._maxes is None:
# first pass
self._mins = [self._imax]
self._maxes = [self._imin]
if not self._neighborhood:
if isinstance(node.slice.value, ast.Num) or\
isinstance(node.slice.value, ast.UnaryOp):
relvalue = self.get_val_from_num(node.slice.value)
elif (hasattr(node.slice.value, 'id') and
node.slice.value.id in self._const_assigns):
relvalue = self._const_assigns[node.slice.value.id]
else:
raise ValueError("Cannot interpret indexing value")
if relvalue < self._mins[0]:
self._mins[0] = relvalue
if relvalue > self._maxes[0]:
self._maxes[0] = relvalue
else:
self._mins[0] = self._neighborhood[0][0]
self._maxes[0] = self._neighborhood[0][1]
return newnode
node = self.generic_visit(node)
if (node.value.id in self._argnames) and (
node.value.id not in self._standard_indexing):
# fancy slice
if isinstance(node.slice, ast.ExtSlice):
return handleExtSlice(node)
# plain slice
if isinstance(node.slice, ast.Slice):
return handleSlice(node)
# 2D index
if isinstance(node.slice.value, ast.Tuple):
return handle2dindex(node)
# 1D index
elif isinstance(node.slice, ast.Index):
return handle1dindex(node)
else: # unknown
raise ValueError("Unhandled subscript")
else:
return node
@property
def idx_len(self):
if self._idx_len == -1:
raise ValueError(
'Transform has not been run/no indexes found')
else:
return self._idx_len
@property
def maxes(self):
return self._maxes
@property
def mins(self):
return self._mins
@property
def id_pattern(self):
return self._id_pat
class TransformReturns(ast.NodeTransformer, Builder):
"""
Transforms return nodes into assignments.
"""
def __init__(self, relidx_info, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._relidx_info = relidx_info
self._ret_var_idx = self.varidx()
retvar = '__b%s' % self._ret_var_idx
self._retvarname = retvar
def visit_Return(self, node):
self.generic_visit(node)
nloops = self._relidx_info.idx_len
var_pattern = self._relidx_info.id_pattern
return self.gen_assign(
self._retvarname, node.value,
[var_pattern % self.ids[l] for l in range(nloops)])
@property
def ret_var_name(self):
return self._retvarname
class FixFunc(ast.NodeTransformer, Builder):
""" The main function rewriter, takes the body of the kernel and generates:
* checking function calls
* return value allocation
* loop nests
* return site
* Function definition as an entry point
"""
def __init__(self, kprops, relidx_info, ret_info,
cval, standard_indexing, neighborhood, *args, **kwargs):
ast.NodeTransformer.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._original_kernel = kprops.original_kernel
self._argnames = kprops.argnames
self._retty = kprops.retty
self._relidx_info = relidx_info
self._ret_info = ret_info
self._standard_indexing = standard_indexing \
if standard_indexing else []
self._neighborhood = neighborhood if neighborhood else tuple()
self._relidx_args = [
x for x in self._argnames if x not in self._standard_indexing]
# switch cval to python type
if hasattr(cval, 'dtype'):
self.cval = cval.tolist()
else:
self.cval = cval
self.stencil_arr = self._argnames[0]
def visit_FunctionDef(self, node):
"""
Transforms the kernel function into a function that will perform
the stencil like behaviour on the kernel.
"""
self.generic_visit(node)
# this function validates arguments and is injected into the top
# of the stencil call
def check_stencil_arrays(*args, **kwargs):
# the first has to be an array due to parfors requirements
neighborhood = kwargs.get('neighborhood')
init_shape = args[0].shape
if neighborhood is not None:
if len(init_shape) != len(neighborhood):
raise ValueError("Invalid neighborhood supplied")
for x in args[1:]:
if hasattr(x, 'shape'):
if init_shape != x.shape:
raise ValueError(
"Input stencil arrays do not commute")
checksrc = inspect.getsource(check_stencil_arrays)
check_impl = ast.parse(
checksrc.strip()).body[0] # don't need module
ast.fix_missing_locations(check_impl)
checker_call = self.gen_call(
'check_stencil_arrays',
self._relidx_args,
kwargs=['neighborhood'])
nloops = self._relidx_info.idx_len
def computebound(mins, maxs):
minlim = 0 if mins >= 0 else -mins
maxlim = -maxs if maxs > 0 else 0
return (minlim, maxlim)
var_pattern = self._relidx_info.id_pattern
loop_body = node.body
# create loop nests
loop_count = 0
for l in range(nloops):
minlim, maxlim = computebound(
self._relidx_info.mins[loop_count],
self._relidx_info.maxes[loop_count])
minbound = minlim
maxbound = self.gen_subscript(
self.stencil_arr, 'shape', loop_count, maxlim)
loops = self.gen_loop(
var_pattern % self.ids[loop_count],
minbound, maxbound, body=loop_body)
loop_body = [loops]
loop_count += 1
# patch loop location
ast.copy_location(loops, node)
_rettyname = self._retty.targets[0]
# allocate a return
retvar = self._ret_info.ret_var_name
allocate = self.gen_alloc_return(
self.stencil_arr, retvar, _rettyname, self.cval)
ast.copy_location(allocate, node)
# generate the return
returner = self.gen_return(retvar)
ast.copy_location(returner, node)
add_kwarg = [ast.arg('neighborhood', None)]
defaults = []
newargs = ast.arguments(
args=node.args.args +
add_kwarg,
defaults=defaults,
vararg=None,
kwarg=None,
kwonlyargs=[],
kw_defaults=[],
posonlyargs=[])
new = ast.FunctionDef(
name='__%s' %
node.name,
args=newargs,
body=[
check_impl,
checker_call,
self._original_kernel,
self._retty,
allocate,
loops,
returner],
decorator_list=[])
ast.copy_location(new, node)
return new
class GetKernelProps(ast.NodeVisitor, Builder):
""" Gets the argument names and other properties
of the original kernel.
"""
def __init__(self, *args, **kwargs):
ast.NodeVisitor.__init__(self, *args, **kwargs)
pyStencilGenerator.Builder.__init__(self, *args, **kwargs)
self._argnames = None
self._kwargnames = None
self._retty = None
self._original_kernel = None
self._const_assigns = {}
def visit_FunctionDef(self, node):
if self._argnames is not None or self._kwargnames is not None:
raise RuntimeError("multiple definition of function/args?")
attr = 'arg'
self._argnames = [getattr(x, attr) for x in node.args.args]
if node.args.kwarg:
self._kwargnames = [x.arg for x in node.args.kwarg]
compute_retdtype = self.gen_call(node.name, self._argnames)
self._retty = ast.Assign(targets=[ast.Name(
id='__retdtype',
ctx=ast.Store())], value=compute_retdtype.value)
self._original_kernel = ast.fix_missing_locations(deepcopy(node))
self.generic_visit(node)
def visit_Assign(self, node):
self.generic_visit(node)
tgt = node.targets
if len(tgt) == 1:
target = tgt[0]
if isinstance(target, ast.Name):
if isinstance(node.value, ast.Num):
self._const_assigns[target.id] = node.value.n
elif isinstance(node.value, ast.UnaryOp):
if isinstance(node.value, ast.UAdd):
self._const_assigns[target.id] = node.value.n
else:
self._const_assigns[target.id] = -node.value.n
@property
def argnames(self):
"""
The names of the arguments to the function
"""
return self._argnames
@property
def const_assigns(self):
"""
A map of variable name to constant for variables that are simple
constant assignments
"""
return self._const_assigns
@property
def retty(self):
"""
The return type
"""
return self._retty
@property
def original_kernel(self):
"""
The original unmutated kernel
"""
return self._original_kernel
class FixCalls(ast.NodeTransformer):
""" Fixes call sites for astor (in case it is in use) """
def visit_Call(self, node):
self.generic_visit(node)
# Add in starargs and kwargs to calls
new = ast.Call(
func=node.func,
args=node.args,
keywords=node.keywords,
starargs=None,
kwargs=None)
return new
def generate_stencil_tree(
self, func, cval, standard_indexing, neighborhood):
"""
Generates the AST tree for a stencil from:
func - a python stencil kernel
cval, standard_indexing and neighborhood as per the @stencil decorator
"""
src = inspect.getsource(func)
tree = ast.parse(src.strip())
# Prints debugging information if True.
# If astor is installed the decompilation of the AST is also printed
DEBUG = False
if DEBUG:
print("ORIGINAL")
print(ast.dump(tree))
def pipeline(tree):
""" the pipeline of manipulations """
# get the arg names
kernel_props = self.GetKernelProps()
kernel_props.visit(tree)
argnm = kernel_props.argnames
const_asgn = kernel_props.const_assigns
if standard_indexing:
for x in standard_indexing:
if x not in argnm:
msg = ("Non-existent variable "
"specified in standard_indexing")
raise ValueError(msg)
# fold consts
fold_const = self.FoldConst()
fold_const.visit(tree)
# rewrite the relative indices as induced indices
relidx_fixer = self.FixRelIndex(
argnm, const_asgn, standard_indexing, neighborhood)
relidx_fixer.visit(tree)
# switch returns into assigns
return_transformer = self.TransformReturns(relidx_fixer)
return_transformer.visit(tree)
# generate the function body and loop nests and assemble
fixer = self.FixFunc(
kernel_props,
relidx_fixer,
return_transformer,
cval,
standard_indexing,
neighborhood)
fixer.visit(tree)
# fix up the call sites so they work better with astor
callFixer = self.FixCalls()
callFixer.visit(tree)
ast.fix_missing_locations(tree.body[0])
# run the pipeline of transforms on the tree
pipeline(tree)
if DEBUG:
print("\n\n\nNEW")
print(ast.dump(tree, include_attributes=True))
try:
import astor
print(astor.to_source(tree))
except ImportError:
pass
return tree
def pyStencil(func_or_mode='constant', **options):
"""
A pure python implementation of (a large subset of) stencil functionality,
equivalent to StencilFunc.
"""
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
assert isinstance(func_or_mode, str), """stencil mode should be
a string"""
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
cval = options.get('cval', 0)
standard_indexing = options.get('standard_indexing', None)
neighborhood = options.get('neighborhood', None)
# generate a new AST tree from the kernel func
gen = pyStencilGenerator()
tree = gen.generate_stencil_tree(func, cval, standard_indexing,
neighborhood)
# breathe life into the tree
mod_code = compile(tree, filename="<ast>", mode="exec")
func_code = mod_code.co_consts[0]
full_func = pytypes.FunctionType(func_code, globals())
return full_func
@skip_unsupported
class TestManyStencils(TestStencilBase):
def __init__(self, *args, **kwargs):
super(TestManyStencils, self).__init__(*args, **kwargs)
def check(self, pyfunc, *args, **kwargs):
"""
For a given kernel:
The expected result is computed from a pyStencil version of the
stencil.
The following results are then computed:
* from a pure @stencil decoration of the kernel.
* from the njit of a trivial wrapper function around the pure @stencil
decorated function.
* from the njit(parallel=True) of a trivial wrapper function around
the pure @stencil decorated function.
The results are then compared.
"""
options = kwargs.get('options', dict())
expected_exception = kwargs.get('expected_exception')
# DEBUG print output arrays
DEBUG_OUTPUT = False
# collect fails
should_fail = []
should_not_fail = []
# runner that handles fails
@contextmanager
def errorhandler(exty=None, usecase=None):
try:
yield
except Exception as e:
if exty is not None:
lexty = exty if hasattr(exty, '__iter__') else [exty, ]
found = False
for ex in lexty:
found |= isinstance(e, ex)
if not found:
raise
else:
should_not_fail.append(
(usecase, "%s: %s" %
(type(e), str(e))))
else:
if exty is not None:
should_fail.append(usecase)
if isinstance(expected_exception, dict):
pystencil_ex = expected_exception['pyStencil']
stencil_ex = expected_exception['stencil']
njit_ex = expected_exception['njit']
parfor_ex = expected_exception['parfor']
else:
pystencil_ex = expected_exception
stencil_ex = expected_exception
njit_ex = expected_exception
parfor_ex = expected_exception
stencil_args = {'func_or_mode': pyfunc}
stencil_args.update(options)
expected_present = True
try:
# ast impl
ast_impl = pyStencil(func_or_mode=pyfunc, **options)
expected = ast_impl(
*args, neighborhood=options.get('neighborhood'))
if DEBUG_OUTPUT:
print("\nExpected:\n", expected)
except Exception as ex:
# check exception is expected
with errorhandler(pystencil_ex, "pyStencil"):
raise ex
pyStencil_unhandled_ex = ex
expected_present = False
stencilfunc_output = None
with errorhandler(stencil_ex, "@stencil"):
stencil_func_impl = stencil(**stencil_args)
# stencil result
stencilfunc_output = stencil_func_impl(*args)
# wrapped stencil impl, could this be generated?
if len(args) == 1:
def wrap_stencil(arg0):
return stencil_func_impl(arg0)
elif len(args) == 2:
def wrap_stencil(arg0, arg1):
return stencil_func_impl(arg0, arg1)
elif len(args) == 3:
def wrap_stencil(arg0, arg1, arg2):
return stencil_func_impl(arg0, arg1, arg2)
else:
raise ValueError(
"Up to 3 arguments can be provided, found %s" %
len(args))
sig = tuple([numba.typeof(x) for x in args])
njit_output = None
with errorhandler(njit_ex, "njit"):
wrapped_cfunc = self.compile_njit(wrap_stencil, sig)
# njit result
njit_output = wrapped_cfunc.entry_point(*args)
parfor_output = None
with errorhandler(parfor_ex, "parfors"):
wrapped_cpfunc = self.compile_parallel(wrap_stencil, sig)
# parfor result
parfor_output = wrapped_cpfunc.entry_point(*args)
if DEBUG_OUTPUT:
print("\n@stencil_output:\n", stencilfunc_output)
print("\nnjit_output:\n", njit_output)
print("\nparfor_output:\n", parfor_output)
if expected_present:
try:
if not stencil_ex:
np.testing.assert_almost_equal(
stencilfunc_output, expected, decimal=1)
self.assertEqual(expected.dtype, stencilfunc_output.dtype)
except Exception as e:
should_not_fail.append(
('@stencil', "%s: %s" %
(type(e), str(e))))
print("@stencil failed: %s" % str(e))
try:
if not njit_ex:
np.testing.assert_almost_equal(
njit_output, expected, decimal=1)
self.assertEqual(expected.dtype, njit_output.dtype)
except Exception as e:
should_not_fail.append(('njit', "%s: %s" % (type(e), str(e))))
print("@njit failed: %s" % str(e))
try:
if not parfor_ex:
np.testing.assert_almost_equal(
parfor_output, expected, decimal=1)
self.assertEqual(expected.dtype, parfor_output.dtype)
try:
self.assertIn(
'@do_scheduling',
wrapped_cpfunc.library.get_llvm_str())
except AssertionError:
msg = 'Could not find `@do_scheduling` in LLVM IR'
raise AssertionError(msg)
except Exception as e:
should_not_fail.append(
('parfors', "%s: %s" %
(type(e), str(e))))
print("@njit(parallel=True) failed: %s" % str(e))
if DEBUG_OUTPUT:
print("\n\n")
if should_fail:
msg = ["%s" % x for x in should_fail]
raise RuntimeError(("The following implementations should have "
"raised an exception but did not:\n%s") % msg)
if should_not_fail:
impls = ["%s" % x[0] for x in should_not_fail]
errs = ''.join(["%s: Message: %s\n\n" %
x for x in should_not_fail])
str1 = ("The following implementations should not have raised an "
"exception but did:\n%s\n" % impls)
str2 = "Errors were:\n\n%s" % errs
raise RuntimeError(str1 + str2)
if not expected_present:
if expected_exception is None:
raise RuntimeError(
"pyStencil failed, was not caught/expected",
pyStencil_unhandled_ex)
def exception_dict(self, **kwargs):
d = dict()
d['pyStencil'] = None
d['stencil'] = None
d['njit'] = None
d['parfor'] = None
for k, v in kwargs.items():
d[k] = v
return d
def test_basic00(self):
"""rel index"""
def kernel(a):
return a[0, 0]
a = np.arange(12).reshape(3, 4)
self.check(kernel, a)
def test_basic01(self):
"""rel index add const"""
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic02(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, -1]
self.check(kernel, a)
def test_basic03(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 0]
self.check(kernel, a)
def test_basic04(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0]
self.check(kernel, a)
def test_basic05(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1]
self.check(kernel, a)
def test_basic06(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, -1]
self.check(kernel, a)
def test_basic07(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1, 1]
self.check(kernel, a)
def test_basic08(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1]
self.check(kernel, a)
def test_basic09(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-2, 2]
self.check(kernel, a)
def test_basic10(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0]
self.check(kernel, a)
def test_basic11(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 0] + a[1, 0]
self.check(kernel, a)
def test_basic12(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, 1] + a[1, -1]
self.check(kernel, a)
def test_basic13(self):
"""rel index add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-1, -1] + a[1, 1]
self.check(kernel, a)
def test_basic14(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + 1j
self.check(kernel, a)
def test_basic14b(self):
"""rel index add domain change const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
t = 1.j
return a[0, 0] + t
self.check(kernel, a)
def test_basic15(self):
"""two rel index, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[1, 0] + 1.
self.check(kernel, a)
def test_basic16(self):
"""two rel index OOB, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[10, 0] + 1.
# only pyStencil bounds checks
ex = self.exception_dict(pyStencil=IndexError)
self.check(kernel, a, expected_exception=ex)
def test_basic17(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[2, 0] + 1.
self.check(kernel, a)
def test_basic18(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[-2, 0] + 1.
self.check(kernel, a)
def test_basic19(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 3] + 1.
self.check(kernel, a)
def test_basic20(self):
"""two rel index boundary test, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, -3] + 1.
self.check(kernel, a)
def test_basic21(self):
"""same rel, add const"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic22(self):
"""rel idx const expr folding, add const"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[1 + 0, 0] + a[0, 0] + 1.
self.check(kernel, a)
def test_basic23(self):
"""rel idx, work in body"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0] + x
self.check(kernel, a)
def test_basic23a(self):
"""rel idx, dead code should not impact rel idx"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
x = np.sin(10 + a[2, 1])
return a[1 + 0, 0] + a[0, 0]
self.check(kernel, a)
def test_basic24(self):
"""1d idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return a[0] + 1.
self.check(kernel, a, expected_exception=[ValueError, TypingError])
def test_basic25(self):
"""no idx on 2d arr"""
a = np.arange(12).reshape(3, 4)
def kernel(a):
return 1.
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic26(self):
"""3d arr"""
a = np.arange(64).reshape(4, 8, 2)
def kernel(a):
return a[0, 0, 0] - a[0, 1, 0] + 1.
self.check(kernel, a)
def test_basic27(self):
"""4d arr"""
a = np.arange(128).reshape(4, 8, 2, 2)
def kernel(a):
return a[0, 0, 0, 0] - a[0, 1, 0, -1] + 1.
self.check(kernel, a)
def test_basic28(self):
"""type widen """
a = np.arange(12).reshape(3, 4).astype(np.float32)
def kernel(a):
return a[0, 0] + np.float64(10.)
self.check(kernel, a)
def test_basic29(self):
"""const index from func """
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, int(np.cos(0))]
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic30(self):
"""signed zeros"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[-0, -0]
self.check(kernel, a)
def test_basic31(self):
"""does a const propagate? 2D"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
t = 1
return a[t, 0]
self.check(kernel, a)
@unittest.skip("constant folding not implemented")
def test_basic31b(self):
"""does a const propagate?"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
s = 1
t = 1 - s
return a[t, 0]
self.check(kernel, a)
def test_basic31c(self):
"""does a const propagate? 1D"""
a = np.arange(12.)
def kernel(a):
t = 1
return a[t]
self.check(kernel, a)
def test_basic32(self):
"""typed int index"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[np.int8(1), 0]
self.check(kernel, a, expected_exception=[ValueError, LoweringError])
def test_basic33(self):
"""add 0d array"""
a = np.arange(12.).reshape(3, 4)
def kernel(a):
return a[0, 0] + np.array(1)
self.check(kernel, a)
def test_basic34(self):
"""More complex rel index with dependency on addition rel index"""
def kernel(a):
g = 4. + a[0, 1]
return g + (a[0, 1] + a[1, 0] + a[0, -1] + np.sin(a[-2, 0]))
a = np.arange(144).reshape(12, 12)
self.check(kernel, a)
def test_basic35(self):
"""simple cval """
def kernel(a):
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(kernel, a, options={'cval': 5}, expected_exception=ex)
def test_basic36(self):
"""more complex with cval"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5.})
def test_basic37(self):
"""cval is expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': 5 + 63.})
def test_basic38(self):
"""cval is complex"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(kernel, a, options={'cval': 1.j}, expected_exception=ex)
def test_basic39(self):
"""cval is func expr"""
def kernel(a):
return a[0, 1] + a[0, -1] + a[1, -1] + a[1, -1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a, options={'cval': np.sin(3.) + np.cos(2)})
def test_basic40(self):
"""2 args!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b)
def test_basic41(self):
"""2 args! rel arrays wildly not same size!"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(1.).reshape(1, 1)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic42(self):
"""2 args! rel arrays very close in size"""
def kernel(a, b):
return a[0, 1] + b[0, -2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(9.).reshape(3, 3)
self.check(
kernel, a, b, expected_exception=[
ValueError, AssertionError])
def test_basic43(self):
"""2 args more complexity"""
def kernel(a, b):
return a[0, 1] + a[1, 2] + b[-2, 0] + b[0, -1]
a = np.arange(30.).reshape(5, 6)
b = np.arange(30.).reshape(5, 6)
self.check(kernel, a, b)
def test_basic44(self):
"""2 args, has assignment before use"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic45(self):
"""2 args, has assignment and then cross dependency"""
def kernel(a, b):
a[0, 1] = 12
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic46(self):
"""2 args, has cross relidx assignment"""
def kernel(a, b):
a[0, 1] = b[1, 2]
return a[0, 1] + a[1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, expected_exception=[
ValueError, LoweringError])
def test_basic47(self):
"""3 args"""
def kernel(a, b, c):
return a[0, 1] + b[1, 0] + c[-1, 0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c)
# matches pyStencil, but all ought to fail
# probably hard to detect?
def test_basic48(self):
"""2 args, has assignment before use via memory alias"""
def kernel(a):
c = a.T
c[:, :] = 10
return a[0, 1]
a = np.arange(12.).reshape(3, 4)
self.check(kernel, a)
def test_basic49(self):
"""2 args, standard_indexing on second"""
def kernel(a, b):
return a[0, 1] + b[0, 3]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
@unittest.skip("dynamic range checking not implemented")
def test_basic50(self):
"""2 args, standard_indexing OOB"""
def kernel(a, b):
return a[0, 1] + b[0, 15]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=IndexError)
def test_basic51(self):
"""2 args, standard_indexing, no relidx"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel, a, b, options={
'standard_indexing': [
'a', 'b']}, expected_exception=[
ValueError, LoweringError])
def test_basic52(self):
"""3 args, standard_indexing on middle arg """
def kernel(a, b, c):
return a[0, 1] + b[0, 1] + c[1, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(4.).reshape(2, 2)
c = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, c, options={'standard_indexing': 'b'})
def test_basic53(self):
"""2 args, standard_indexing on variable that does not exist"""
def kernel(a, b):
return a[0, 1] + b[0, 2]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=Exception,
parfor=ValueError,
njit=Exception)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'c'},
expected_exception=ex)
def test_basic54(self):
"""2 args, standard_indexing, index from var"""
def kernel(a, b):
t = 2
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic55(self):
"""2 args, standard_indexing, index from more complex var"""
def kernel(a, b):
s = 1
t = 2 - s
return a[0, 1] + b[0, t]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic56(self):
"""2 args, standard_indexing, added complexity """
def kernel(a, b):
s = 1
acc = 0
for k in b[0, :]:
acc += k
t = 2 - s - 1
return a[0, 1] + b[0, t] + acc
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic57(self):
"""2 args, standard_indexing, split index operation """
def kernel(a, b):
c = b[0]
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic58(self):
"""2 args, standard_indexing, split index with broadcast mutation """
def kernel(a, b):
c = b[0] + 1
return a[0, 1] + c[1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(kernel, a, b, options={'standard_indexing': 'b'})
def test_basic59(self):
"""3 args, mix of array, relative and standard indexing and const"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = 10
self.check(kernel, a, b, c, options={'standard_indexing': ['b', 'c']})
def test_basic60(self):
"""3 args, mix of array, relative and standard indexing,
tuple pass through"""
def kernel(a, b, c):
return a[0, 1] + b[1, 1] + c[0]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
c = (10,)
# parfors does not support tuple args for stencil kernels
ex = self.exception_dict(parfor=ValueError)
self.check(
kernel, a, b, c, options={
'standard_indexing': [
'b', 'c']}, expected_exception=ex)
def test_basic61(self):
"""2 args, standard_indexing on first"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'a'},
expected_exception=Exception)
def test_basic62(self):
"""2 args, standard_indexing and cval"""
def kernel(a, b):
return a[0, 1] + b[1, 1]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b',
'cval': 10.})
def test_basic63(self):
"""2 args, standard_indexing applied to relative, should fail,
non-const idx"""
def kernel(a, b):
return a[0, b[0, 1]]
a = np.arange(12.).reshape(3, 4)
b = np.arange(12).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(
kernel,
a,
b,
options={
'standard_indexing': 'b'},
expected_exception=ex)
# stencil, njit, parfors all fail. Does this make sense?
def test_basic64(self):
"""1 arg that uses standard_indexing"""
def kernel(a):
return a[0, 0]
a = np.arange(12.).reshape(3, 4)
self.check(
kernel,
a,
options={
'standard_indexing': 'a'},
expected_exception=[
ValueError,
LoweringError])
def test_basic65(self):
"""basic induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
# Should this work? a[0] is out of neighborhood?
def test_basic66(self):
"""basic const neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
cumul += a[0]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic67(self):
"""basic 2d induced neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic67b(self):
"""basic 2d induced 1D neighborhood"""
def kernel(a):
cumul = 0
for j in range(-10, 1):
cumul += a[0, j]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(
kernel,
a,
options={
'neighborhood': (
(-10,
0),
)},
expected_exception=[
TypingError,
ValueError])
# Should this work or is it UB? a[i, 0] is out of neighborhood?
def test_basic68(self):
"""basic 2d one induced, one cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[i, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
# Should this work or is it UB? a[0, 0] is out of neighborhood?
def test_basic69(self):
"""basic 2d two cost neighborhood test"""
def kernel(a):
cumul = 0
for i in range(-5, 1):
for j in range(-10, 1):
cumul += a[0, 0]
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic70(self):
"""neighborhood adding complexity"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-5, 1):
t = zz + i
for j in range(-10, 1):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-5, 0), (-10, 0),)})
def test_basic71(self):
"""neighborhood, type change"""
def kernel(a):
cumul = 0
for i in range(-29, 1):
k = 0.
if i > -15:
k = 1j
cumul += a[i] + k
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic72(self):
"""neighborhood, narrower range than specified"""
def kernel(a):
cumul = 0
for i in range(-19, -3):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-29, 0),)})
def test_basic73(self):
"""neighborhood, +ve range"""
def kernel(a):
cumul = 0
for i in range(5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((5, 10),)})
def test_basic73b(self):
"""neighborhood, -ve range"""
def kernel(a):
cumul = 0
for i in range(-10, -4):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -5),)})
def test_basic74(self):
"""neighborhood, -ve->+ve range span"""
def kernel(a):
cumul = 0
for i in range(-5, 11):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-5, 10),)})
def test_basic75(self):
"""neighborhood, -ve->-ve range span"""
def kernel(a):
cumul = 0
for i in range(-10, -1):
cumul += a[i]
return cumul / 30
a = np.arange(60.)
self.check(kernel, a, options={'neighborhood': ((-10, -2),)})
def test_basic76(self):
"""neighborhood, mixed range span"""
def kernel(a):
cumul = 0
zz = 12.
for i in range(-3, 0):
t = zz + i
for j in range(-3, 4):
cumul += a[i, j] + t
return cumul / (10 * 5)
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'neighborhood': ((-3, -1), (-3, 3),)})
def test_basic77(self):
""" neighborhood, two args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0),)})
def test_basic78(self):
""" neighborhood, two args, -ve range, -ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(-7, -1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(
kernel, a, b, options={
'neighborhood': (
(-6, -3), (-7, -2),)})
def test_basic78b(self):
""" neighborhood, two args, -ve range, +ve range """
def kernel(a, b):
cumul = 0
for i in range(-6, -2):
for j in range(2, 10):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(15. * 20.).reshape(15, 20)
b = np.arange(15. * 20.).reshape(15, 20)
self.check(kernel, a, b, options={'neighborhood': ((-6, -3), (2, 9),)})
def test_basic79(self):
""" neighborhood, two incompatible args """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i, j]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = np.arange(10. * 20.).reshape(10, 10, 2)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=TypingError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0),)}, expected_exception=ex)
def test_basic80(self):
""" neighborhood, type change """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = 12.j
self.check(kernel, a, b, options={'neighborhood': ((-3, 0), (-3, 0))})
def test_basic81(self):
""" neighborhood, dimensionally incompatible arrays """
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[i]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a[0].copy()
ex = self.exception_dict(
pyStencil=ValueError,
stencil=TypingError,
parfor=AssertionError,
njit=TypingError)
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0))}, expected_exception=ex)
def test_basic82(self):
""" neighborhood, with standard_indexing"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b'})
def test_basic83(self):
""" neighborhood, with standard_indexing and cval"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
return cumul / (9.)
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic84(self):
""" kernel calls njit """
def kernel(a):
return a[0, 0] + addone_njit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic85(self):
""" kernel calls njit(parallel=True)"""
def kernel(a):
return a[0, 0] + addone_pjit(a[0, 1])
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
# njit/parfors fail correctly, but the error message isn't very informative
def test_basic86(self):
""" bad kwarg """
def kernel(a):
return a[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a, options={'bad': 10},
expected_exception=[ValueError, TypingError])
def test_basic87(self):
""" reserved arg name in use """
def kernel(__sentinel__):
return __sentinel__[0, 0]
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic88(self):
""" use of reserved word """
def kernel(a, out):
return out * a[0, 1]
a = np.arange(12.).reshape(3, 4)
ex = self.exception_dict(
pyStencil=ValueError,
stencil=ValueError,
parfor=ValueError,
njit=LoweringError)
self.check(
kernel,
a,
1.0,
options={},
expected_exception=ex)
def test_basic89(self):
""" basic multiple return"""
def kernel(a):
if a[0, 1] > 10:
return 10.
elif a[0, 3] < 8:
return a[0, 0]
else:
return 7.
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic90(self):
""" neighborhood, with standard_indexing and cval, multiple returns"""
def kernel(a, b):
cumul = 0
for i in range(-3, 1):
for j in range(-3, 1):
cumul += a[i, j] + b[1, 3]
res = cumul / (9.)
if res > 200.0:
return res + 1.0
else:
return res
a = np.arange(10. * 20.).reshape(10, 20)
b = a.copy()
self.check(
kernel, a, b, options={
'neighborhood': (
(-3, 0), (-3, 0)), 'standard_indexing': 'b', 'cval': 1.5})
def test_basic91(self):
""" Issue #3454, const(int) == const(int) evaluating incorrectly. """
def kernel(a):
b = 0
if(2 == 0):
b = 2
return a[0, 0] + b
a = np.arange(10. * 20.).reshape(10, 20)
self.check(kernel, a)
def test_basic92(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A)
def test_basic93(self):
""" Issue #3497, bool return type evaluating incorrectly. """
def kernel(a):
return (a[-1, -1] ^ a[-1, 0] ^ a[-1, 1] ^
a[0, -1] ^ a[0, 0] ^ a[0, 1] ^
a[1, -1] ^ a[1, 0] ^ a[1, 1])
A = np.array(np.arange(20) % 2).reshape(4, 5).astype(np.bool_)
self.check(kernel, A, options={'cval': True})
def test_basic94(self):
""" Issue #3528. Support for slices. """
def kernel(a):
return np.median(a[-1:2, -1:2])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a, options={'neighborhood': ((-1, 1), (-1, 1),)})
@unittest.skip("not yet supported")
def test_basic95(self):
""" Slice, calculate neighborhood. """
def kernel(a):
return np.median(a[-1:2, -3:4])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
def test_basic96(self):
""" 1D slice. """
def kernel(a):
return np.median(a[-1:2])
a = np.arange(20, dtype=np.uint32)
self.check(kernel, a, options={'neighborhood': ((-1, 1),)})
@unittest.skip("not yet supported")
def test_basic97(self):
""" 2D slice and index. """
def kernel(a):
return np.median(a[-1:2, 3])
a = np.arange(20, dtype=np.uint32).reshape(4, 5)
self.check(kernel, a)
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "3b45cfaeb1b607c4930ba9f456afeb73",
"timestamp": "",
"source": "github",
"line_count": 2793,
"max_line_length": 94,
"avg_line_length": 34.58575008950949,
"alnum_prop": 0.4722250978281124,
"repo_name": "stuartarchibald/numba",
"id": "264508ce5debdedaea5f8db0102406b0eeb698f2",
"size": "96683",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "numba/tests/test_stencils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6768"
},
{
"name": "C",
"bytes": "625527"
},
{
"name": "C++",
"bytes": "87110"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8342308"
},
{
"name": "Shell",
"bytes": "9062"
}
],
"symlink_target": ""
} |
"""
Demos for basic time encoding and decoding algorithms that use
IAF neurons.
"""
# Copyright (c) 2009-2015, Lev Givon
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
import sys
import numpy as np
# Set matplotlib backend so that plots can be generated without a
# display:
import matplotlib
matplotlib.use('AGG')
from bionet.utils.misc import func_timer
import bionet.utils.band_limited as bl
import bionet.utils.plotting as pl
import bionet.ted.iaf as iaf
# For determining output plot file names:
output_name = 'iaf_demo_'
output_count = 0
output_ext = '.png'
# Define algorithm parameters and input signal:
dur = 0.1
dt = 1e-6
f = 32
bw = 2*np.pi*f
t = np.arange(0, dur, dt)
np.random.seed(0)
noise_power = None
if noise_power == None:
fig_title = 'IAF Input Signal with no Noise';
else:
fig_title = 'IAF Input Signal with %d dB of Noise' % noise_power;
print fig_title
u = func_timer(bl.gen_band_limited)(dur, dt, f, noise_power)
pl.plot_signal(t, u, fig_title,
output_name + str(output_count) + output_ext)
b = 3.5 # bias
d = 0.7 # threshold
R = 10.0 # resistance
C = 0.01 # capacitance
try:
iaf.iaf_recoverable(u, bw, b, d, R, C)
except ValueError('reconstruction condition not satisfied'):
sys.exit()
M = 5 # number of bins for fast decoding algorithm
L = 5 # number of recursions for recursive decoding algorithm
# Test leaky algorithms:
output_count += 1
fig_title = 'Signal Encoded Using Leaky IAF Encoder'
print fig_title
s = func_timer(iaf.iaf_encode)(u, dt, b, d, R, C)
pl.plot_encoded(t, u, s, fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Leaky IAF Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode)(s, dur, dt, bw, b, d, R, C)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Leaky Fast IAF Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_fast)(s, dur, dt, bw, M, b, d, R, C)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
# Test ideal algorithms:
R = np.inf
output_count += 1
fig_title = 'Signal Encoded Using Ideal IAF Encoder'
print fig_title
s = func_timer(iaf.iaf_encode)(u, dt, b, d, R, C)
pl.plot_encoded(t, u, s, fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Ideal IAF Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode)(s, dur, dt, bw, b, d, R, C)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
output_count += 1
fig_title = 'Signal Decoded Using Ideal Fast IAF Decoder'
print fig_title
u_rec = func_timer(iaf.iaf_decode_fast)(s, dur, dt, bw, M, b, d, R, C)
pl.plot_compare(t, u, u_rec, fig_title,
output_name + str(output_count) + output_ext)
| {
"content_hash": "e5cffd6c17d6a9a93d18169fda308453",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 70,
"avg_line_length": 27.97222222222222,
"alnum_prop": 0.6752730883813307,
"repo_name": "bionet/ted.python",
"id": "4375265d350c92780a14c399f48722995444397c",
"size": "3044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/iaf_demo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "655"
},
{
"name": "Python",
"bytes": "374643"
}
],
"symlink_target": ""
} |
"""
tests.integration.test_index
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Integration tests for the search index.
"""
import copy
from datetime import timedelta
import pytest
from jobber.core.models import Location, Company, Job
from jobber.core.search import Index
@pytest.fixture(scope='function')
def location():
return Location(city=u'Lïmásѕ߀ɭ', country_code='CYP')
@pytest.fixture(scope='function')
def company():
return Company(name=u'remedica')
@pytest.fixture(scope='function')
def job(company, location):
return Job(title=u'testfoo',
description=u'testfoo',
contact_method=1,
remote_work=False,
company=company,
location=location,
job_type=1,
recruiter_name=u'jon',
recruiter_email=u'doe')
class TestSearchIndex(object):
def test_add_document(self, session, index, job):
session.add(job)
session.commit()
index = Index()
index.add_document(job.to_document())
hits = index.search(job.title)
assert len(hits) == 1
assert int(hits[0]['id']) == job.id
def test_update_document(self, session, index, job):
session.add(job)
session.commit()
doc = job.to_document()
index = Index()
index.add_document(doc)
doc['job_type'] = u'updated'
index.update_document(doc)
hits = index.search(u'updated')
assert len(hits) == 1
assert int(hits[0]['id']) == job.id
def test_delete_document(self, session, index, job):
session.add(job)
session.commit()
doc = job.to_document()
index = Index()
index.add_document(doc)
hits = index.search(job.title)
assert len(hits) == 1
index.delete_document(doc['id'])
hits = index.search(job.title)
assert len(hits) == 0
def test_search_limit(self, session, index, job):
doc = job.to_document()
timestamp = doc['created']
bulk = []
for i in range(15):
doc = copy.deepcopy(doc)
doc['id'] = unicode(i)
doc['created'] = timestamp - timedelta(days=i)
bulk.append(doc)
index = Index()
index.add_document_bulk(bulk)
# Search with ascending sort, should return the ids in reverse order.
hits = index.search(job.title, sort=('created', 'asc'))
assert [int(hit['id']) for hit in hits] == range(15)[::-1]
# Search with descending sort.
hits = index.search(job.title, sort=('created', 'desc'))
assert [int(hit['id']) for hit in hits] == range(15)
| {
"content_hash": "ca17457f1f26f22058dd4b7b34cbc0c2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 77,
"avg_line_length": 25.339622641509433,
"alnum_prop": 0.5722263588979896,
"repo_name": "hackcyprus/jobber",
"id": "30369f3121b784a7d783dd8135c4d911d0393d1a",
"size": "2719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_index.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "41661"
},
{
"name": "JavaScript",
"bytes": "6949"
},
{
"name": "Python",
"bytes": "117838"
},
{
"name": "Ruby",
"bytes": "748"
},
{
"name": "Shell",
"bytes": "269"
}
],
"symlink_target": ""
} |
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name = "mpass",
version = "0.0.2",
packages = find_packages(),
scripts = ['scripts/mpass'],
url = "https://github.com/Pringley/mpass-python",
author = "Ben Pringle",
author_email = "ben.pringle@gmail.com",
description = "Hash-based password management",
license = "Apache License (2.0)",
)
| {
"content_hash": "f789c58fbadfc1ebd0693bd46c9dcce6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 53,
"avg_line_length": 25.705882352941178,
"alnum_prop": 0.6498855835240275,
"repo_name": "Pringley/mpass-python",
"id": "ec0fe3e9955c061b329bf0868ac3c6153bd1ccf6",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13029"
}
],
"symlink_target": ""
} |
class Rectangle:
# Functions that begin and end with __ are "magic methods" reserved by Python.
# The init function is used to initialize and return an instance of your Rectangle class.
# self refers to the current instance of Rectangle.
# (self is just a naming convention. In reality, the first parameter is what refers to the current instance.)
def __init__(self, length, width):
self.length = length # Create member variables length and width.
self.width = width
# Create member function area. Notice that self (the current rectangle) is always passed as the first parameter.
def area(self):
return self.length * self.width
rect = Rectangle(2, 3)
print(rect)
print(rect.area())
| {
"content_hash": "a207597209491fdf94833df472cadf84",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 116,
"avg_line_length": 43.588235294117645,
"alnum_prop": 0.7031039136302294,
"repo_name": "JonTheBurger/python_class",
"id": "4fe566cf3ca2b75417119434e311f609b660b49c",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chapter 4/lessons/classes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28910"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
import django_cassandra_engine as meta
DESCRIPTION = 'Django Cassandra Engine - the Cassandra backend for Django'
LONG_DESCRIPTION = None
try:
LONG_DESCRIPTION = open('README.rst').read()
except IOError:
pass
setup(
name='django-cassandra-engine',
version='.'.join(map(str, meta.__version__)),
author=meta.__author__,
author_email=meta.__contact__,
url=meta.__homepage__,
keywords='django cassandra engine backend driver wrapper database nonrel '
'cqlengine',
download_url='http://github.com/r4fek/django-cassandra-engine/tarball/master',
license='2-clause BSD',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
install_requires=[
'Django<1.9',
'cassandra-driver==2.6.0'
],
packages=find_packages(
exclude=['tests', 'tests.*', 'testproject', 'testproject.*']),
test_suite='testproject.runtests.main',
tests_require=['mock==1.0.1'],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
"Environment :: Plugins",
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Database',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "e5f19e395e60fffee26b81908e54ce70",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 82,
"avg_line_length": 33.26,
"alnum_prop": 0.6265784726398076,
"repo_name": "slurms/django-cassandra-engine",
"id": "dd5965f93e37f9236ebfdd23301719f7ead69f4a",
"size": "1663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "24562"
},
{
"name": "HTML",
"bytes": "6685"
},
{
"name": "JavaScript",
"bytes": "1413"
},
{
"name": "Python",
"bytes": "51102"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.azurestackhci import AzureStackHCIClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-azurestackhci
# USAGE
python delete_cluster.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = AzureStackHCIClient(
credential=DefaultAzureCredential(),
subscription_id="fd3c3665-1729-4b7b-9a38-238e83b0f98b",
)
response = client.clusters.begin_delete(
resource_group_name="test-rg",
cluster_name="myCluster",
).result()
print(response)
# x-ms-original-file: specification/azurestackhci/resource-manager/Microsoft.AzureStackHCI/stable/2022-10-01/examples/DeleteCluster.json
if __name__ == "__main__":
main()
| {
"content_hash": "43b6e28aadb410253323c7088e0c5fad",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 136,
"avg_line_length": 33.24242424242424,
"alnum_prop": 0.7329079307201458,
"repo_name": "Azure/azure-sdk-for-python",
"id": "2d965236d7f95fbaf7ca509c434e7efaacf2b82c",
"size": "1565",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/azurestackhci/azure-mgmt-azurestackhci/generated_samples/delete_cluster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""A module for the Runner base class."""
import cr
class Runner(cr.Action, cr.Plugin.Type):
"""Base class for implementing target runners.
Runner implementations must implement the Kill, Run and Test methods.
"""
SELECTOR_ARG = '--runner'
SELECTOR = 'CR_RUNNER'
SELECTOR_HELP = 'Sets the runner to use to execute the target.'
@classmethod
def AddArguments(cls, command, parser):
parser.add_argument(
'--test', dest='CR_TEST_TYPE',
choices=cr.Target.TEST_TYPES,
default=None,
help="""
Sets the test type to use,
defaults to choosing based on the target.
Set to 'no' to force it to not be a test.
"""
)
cls.AddSelectorArg(command, parser)
@cr.Plugin.activemethod
def Kill(self, targets, arguments):
"""Stops all running processes that match a target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Run(self, target, arguments):
"""Run a new copy of a runnable target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Test(self, target, arguments):
"""Run a test target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Invoke(self, targets, arguments):
"""Invoke a target.
This dispatches to either Test or Run depending on the target type.
"""
for target in targets:
if target.is_test:
self.Test(target, arguments)
else:
self.Run(target, arguments)
@cr.Plugin.activemethod
def Restart(self, targets, arguments):
"""Force a target to restart if it is already running.
Default implementation is to do a Kill Invoke sequence.
Do not call the base version if you implement a more efficient one.
"""
self.Kill(targets, [])
self.Invoke(targets, arguments)
class SkipRunner(Runner):
"""A Runner the user chooses to bypass the run step of a command."""
@property
def priority(self):
return super(SkipRunner, self).priority - 1
def Kill(self, targets, arguments):
pass
def Run(self, target, arguments):
pass
def Test(self, target, arguments):
pass
| {
"content_hash": "e706340dc60880846b72cdfa666c8f16",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 71,
"avg_line_length": 26.53012048192771,
"alnum_prop": 0.662125340599455,
"repo_name": "nwjs/chromium.src",
"id": "923d9ac0276a29c0d7ef7d36f96cbea3fc87b9e0",
"size": "2343",
"binary": false,
"copies": "7",
"ref": "refs/heads/nw70",
"path": "tools/cr/cr/actions/runner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import posixpath
from urllib.parse import urlsplit
from urllib.parse import urlunsplit
def urljoin(base, *args):
"""Helper function to join an arbitrary number of url segments together."""
scheme, netloc, path, query, fragment = urlsplit(base)
path = path if len(path) else "/"
path = posixpath.join(path, *[('%s' % x) for x in args])
return urlunsplit([scheme, netloc, path, query, fragment])
def transform_url_parameters(params):
"""Transform python dictionary to aiohttp valid url parameters.
support for:
key=["a", "b"] -> ?key=a&key=b
"""
if isinstance(params, list):
# nothing to do
return params
p = []
for key, value in params.items():
if isinstance(value, list) or isinstance(value, tuple):
p += [(key, v) for v in value]
else:
p.append((key, value))
return p
| {
"content_hash": "c5e74c51b5a874055ae02e5a22b83361",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 28.516129032258064,
"alnum_prop": 0.6244343891402715,
"repo_name": "chassing/aionap",
"id": "216600457e2699ba1ed6217b1a9be736137f5ea4",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aionap/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "31372"
}
],
"symlink_target": ""
} |
"""
MoinMoin - migration from base rev 1050500
Nothing to do, we just return the new data dir revision.
@copyright: 2006 by Thomas Waldmann
@license: GNU GPL, see COPYING for details.
"""
def execute(script, data_dir, rev):
return 1050600
| {
"content_hash": "c8c4d49420473496a9bdf342d6a9966a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 60,
"avg_line_length": 21.916666666666668,
"alnum_prop": 0.6844106463878327,
"repo_name": "RealTimeWeb/wikisite",
"id": "847d1da260591cad489b2afdf0cc4d7683b5d7ac",
"size": "292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/script/migration/1050500.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
import os
import fixtures
import testtools
from diskimage_builder import element_dependencies
data_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'test-elements'))
def _populate_element(element_dir, element_name, element_deps=[]):
element_home = os.path.join(element_dir, element_name)
os.mkdir(element_home)
deps_path = os.path.join(element_home, 'element-deps')
with open(deps_path, 'w') as deps_file:
deps_file.write("\n".join(element_deps))
class TestElementDeps(testtools.TestCase):
def setUp(self):
super(TestElementDeps, self).setUp()
self.element_dir = self.useFixture(fixtures.TempDir()).path
_populate_element(self.element_dir, 'requires-foo', ['foo'])
_populate_element(self.element_dir, 'foo')
_populate_element(self.element_dir,
'requires-requires-foo',
['requires-foo'])
_populate_element(self.element_dir, 'self', ['self'])
_populate_element(self.element_dir, 'circular1', ['circular2'])
_populate_element(self.element_dir, 'circular2', ['circular1'])
def test_non_transitive_deps(self):
result = element_dependencies.expand_dependencies(
['requires-foo'],
elements_dir=self.element_dir)
self.assertEqual(set(['requires-foo', 'foo']), result)
def test_missing_deps(self):
self.assertRaises(SystemExit,
element_dependencies.expand_dependencies, ['fake'],
self.element_dir)
def test_transitive_deps(self):
result = element_dependencies.expand_dependencies(
['requires-requires-foo'], elements_dir=self.element_dir)
self.assertEqual(set(['requires-requires-foo',
'requires-foo',
'foo']), result)
def test_no_deps(self):
result = element_dependencies.expand_dependencies(
['foo'], elements_dir=self.element_dir)
self.assertEqual(set(['foo']), result)
def test_self(self):
result = element_dependencies.expand_dependencies(
['self'], elements_dir=self.element_dir)
self.assertEqual(set(['self']), result)
def test_circular(self):
result = element_dependencies.expand_dependencies(
['circular1'], elements_dir=self.element_dir)
self.assertEqual(set(['circular1', 'circular2']), result)
class TestElements(testtools.TestCase):
def test_depends_on_env(self):
self.useFixture(
fixtures.EnvironmentVariable('ELEMENTS_PATH', '/foo/bar'))
self.assertEqual('/foo/bar', element_dependencies.get_elements_dir())
def test_env_not_set(self):
self.useFixture(fixtures.EnvironmentVariable('ELEMENTS_PATH', ''))
self.assertRaises(Exception, element_dependencies.get_elements_dir, ())
| {
"content_hash": "d569a862b8ad52207df4abc90f504a70",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 79,
"avg_line_length": 38.526315789473685,
"alnum_prop": 0.6226092896174863,
"repo_name": "citrix-openstack-build/diskimage-builder",
"id": "fc99f0ba00f406541381564231578413e8ae6cf8",
"size": "3557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diskimage_builder/tests/test_elementdeps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26300"
},
{
"name": "Shell",
"bytes": "88527"
}
],
"symlink_target": ""
} |
import xarray as xr
import numpy as np
# This import is only for
from .vegetation import EVI as _EVI_orig, EVI2 as _EVI2_orig, NDVI as _NDVI_orig
def EVI(*args, **kwargs):
"""
Instead of this function, please use the EVI() function in vegetation.py.
"""
return _EVI_orig(*args, **kwargs)
def EVI2(*args, **kwargs):
"""
Instead of this function, please use the EVI2() function in vegetation.py.
"""
return _EVI2_orig(*args, **kwargs)
def NDVI(*args, **kwargs):
"""
Instead of this function, please use the NDVI() function in vegetation.py.
"""
return _NDVI_orig(*args, **kwargs)
def compute_ndvi_anomaly(baseline_data,
scene_data,
baseline_clear_mask=None,
selected_scene_clear_mask=None,
no_data=-9999):
"""Compute the scene+baseline median ndvi values and the difference
Args:
basleine_data: xarray dataset with dims lat, lon, t
scene_data: xarray dataset with dims lat, lon - should be mosaicked already.
baseline_clear_mask: boolean mask signifying clear pixels for the baseline data
selected_scene_clear_mask: boolean mask signifying lcear pixels for the baseline data
no_data: nodata value for the datasets
Returns:
xarray dataset with scene_ndvi, baseline_ndvi(median), ndvi_difference, and ndvi_percentage_change.
"""
from .dc_water_classifier import wofs_classify
assert selected_scene_clear_mask is not None and baseline_clear_mask is not None, "Both the selected scene and baseline data must have associated clear mask data."
#cloud filter + nan out all nodata.
baseline_data = baseline_data.where((baseline_data != no_data) & baseline_clear_mask)
baseline_ndvi = (baseline_data.nir - baseline_data.red) / (baseline_data.nir + baseline_data.red)
median_ndvi = baseline_ndvi.median('time')
#scene should already be mosaicked.
water_class = wofs_classify(scene_data, clean_mask=selected_scene_clear_mask, mosaic=True).wofs
scene_cleaned = scene_data.copy(deep=True).where((scene_data != no_data) & (water_class == 0))
scene_ndvi = (scene_cleaned.nir - scene_cleaned.red) / (scene_cleaned.nir + scene_cleaned.red)
ndvi_difference = scene_ndvi - median_ndvi
ndvi_percentage_change = (scene_ndvi - median_ndvi) / median_ndvi
#convert to conventional nodata vals.
scene_ndvi.values[~np.isfinite(scene_ndvi.values)] = no_data
ndvi_difference.values[~np.isfinite(ndvi_difference.values)] = no_data
ndvi_percentage_change.values[~np.isfinite(ndvi_percentage_change.values)] = no_data
scene_ndvi_dataset = xr.Dataset(
{
'scene_ndvi': scene_ndvi,
'baseline_ndvi': median_ndvi,
'ndvi_difference': ndvi_difference,
'ndvi_percentage_change': ndvi_percentage_change
},
coords={'latitude': scene_data.latitude,
'longitude': scene_data.longitude})
return scene_ndvi_dataset
| {
"content_hash": "46327bb088c17bb0c0e016093de0d5f8",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 167,
"avg_line_length": 39.19230769230769,
"alnum_prop": 0.661432777232581,
"repo_name": "ceos-seo/data_cube_utilities",
"id": "f8b23c3b3a3635153279df3b7fbc72193a0854dd",
"size": "3057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_cube_utilities/dc_ndvi_anomaly.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "5234"
},
{
"name": "Python",
"bytes": "416675"
}
],
"symlink_target": ""
} |
"""Configuration and hyperparameter sweeps."""
from lra_benchmarks.listops.configs import base_listops_config
def get_config():
"""Get the default hyperparameter configuration."""
config = base_listops_config.get_config()
config.model_type = "synthesizer"
return config
def get_hyper(hyper):
return hyper.product([])
| {
"content_hash": "b4471d32c5e420ee00a94add4702753e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 62,
"avg_line_length": 23.714285714285715,
"alnum_prop": 0.7409638554216867,
"repo_name": "google-research/long-range-arena",
"id": "c1bd080113cda97bf7315d62e871fe53ff1cfe5d",
"size": "905",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "lra_benchmarks/listops/configs/synthesizer_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "507289"
}
],
"symlink_target": ""
} |
import csv
import codecs
import cStringIO
class Writer(object):
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, row):
def convstr(val):
if isinstance(val, (int, float)):
return str(val)
else:
return val
row = [convstr(e) for e in row]
self.writer.writerow([s.encode("utf-8") for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for row in rows:
self.writerow(row)
def close(self):
""" Not really needed; only present to make the interface similar to the
xls.py module (xls.writer objects need to be explicitly closed)
"""
pass
| {
"content_hash": "ee949c83bd6f77ae967bc3af679bd487",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 80,
"avg_line_length": 29.93617021276596,
"alnum_prop": 0.5813788201847904,
"repo_name": "maxdl/PointDensitySyn.py",
"id": "a7e80b923b439f501629f3db8d3b65a9b036280f",
"size": "1591",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pointdensitysyn/unicode_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189528"
}
],
"symlink_target": ""
} |
"""
Extract the files from phonondb2017
http://phonondb.mtl.kyoto-u.ac.jp/ph20170621/index.html
and calculate the path using seekpath
"""
import os
import multiprocessing
from phononweb.phonopyphonon import PhonopyPhonon
def calculate_bs(material_id):
"""
go inside the folder and calculate the band-structure
"""
folder = material_id
phonon_yaml_filename = os.path.join(folder,'phonon.yaml')
force_sets_filename = os.path.join(folder,'FORCE_SETS')
nac_filename = os.path.join(folder,'BORN')
#check if BORN exists
if not os.path.isfile(nac_filename): nac_filename = None
#create phonopy object
mpp = PhonopyPhonon.from_files(phonon_yaml_filename,force_sets_filename,nac_filename=nac_filename)
mpp.set_bandstructure_seekpath()
mpp.get_bandstructure()
#write yaml file
yaml_filename = os.path.join(folder,'%s.yaml'%material_id)
mpp.write_band_yaml(filename=yaml_filename)
def run_job(filename):
print(filename)
#extract file
os.system('tar fx %s'%filename)
#calculate band-structure
material_id = filename.split('.')[0]
calculate_bs(material_id)
if __name__ == "__main__":
nthreads = 2
#list all the materials
jobs = [ filename for filename in os.listdir('.') if 'lzma' in filename ]
#paralel
#p = multiprocessing.Pool(nthreads)
#p.map(run_job, jobs)
#serial
list(map(run_job,jobs))
| {
"content_hash": "093b8c33133b857bda40d2daf5315603",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 102,
"avg_line_length": 26.79245283018868,
"alnum_prop": 0.6845070422535211,
"repo_name": "henriquemiranda/phononwebsite",
"id": "c82993ad2475f3a24c9287b16b7c4efa68bdd89f",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "scripts/phonondb/phonondb2017.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3766"
},
{
"name": "Gnuplot",
"bytes": "3774"
},
{
"name": "HTML",
"bytes": "29321"
},
{
"name": "JavaScript",
"bytes": "355258"
},
{
"name": "Python",
"bytes": "80267"
}
],
"symlink_target": ""
} |
"""
.. autoclass:: revscoring.scorer_models.rf.RF
:members:
:member-order:
"""
import logging
from sklearn.ensemble import RandomForestClassifier
from .scorer_model import ScikitLearnClassifier
logger = logging.getLogger("revscoring.scorers.rf")
class RF(ScikitLearnClassifier):
"""
Implements a Random Forest model.
:Params:
features : `collection` of :class:`~revscoring.features.feature.Feature`
The features that the model will be trained on
language : :class:`~revscoring.languages.language.Language`
The language context applied when extracting features.
version : str
A version string representing the version of the model
`**kwargs`
Passed to :class:`sklearn.ensemble.RandomForestClassifier`
"""
def __init__(self, features, *, language=None, version=None, rf=None, **kwargs):
if rf is None: rf = RandomForestClassifier(**kwargs)
super().__init__(features, classifier_model=rf, language=language,
version=version)
RFModel = RF
"Alias for backwards compatibility"
| {
"content_hash": "72a0724288e2a6e2331cea07942f415b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 84,
"avg_line_length": 32.2,
"alnum_prop": 0.6716947648624667,
"repo_name": "eranroz/revscoring",
"id": "39cb0d9d40d96691b4d8f294e615987991901c9a",
"size": "1127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "revscoring/scorer_models/rf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "249956"
}
],
"symlink_target": ""
} |
import logging
from django.core import urlresolvers
from django.template import defaultfilters as filters
from django.utils import http
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class UploadFile(tables.LinkAction):
name = 'upload_file'
verbose_name = _("Upload Template")
url = 'horizon:project:data_processing.cluster_templates:upload_file'
classes = ("btn-launch", "ajax-modal")
icon = "upload"
class CreateCluster(tables.LinkAction):
name = "create cluster"
verbose_name = _("Launch Cluster")
url = "horizon:project:data_processing.clusters:configure-cluster"
classes = ("ajax-modal",)
icon = "plus"
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
params = http.urlencode({"hadoop_version": datum.hadoop_version,
"plugin_name": datum.plugin_name,
"cluster_template_id": datum.id})
return "?".join([base_url, params])
class CopyTemplate(tables.LinkAction):
name = "copy"
verbose_name = _("Copy Template")
url = "horizon:project:data_processing.cluster_templates:copy"
classes = ("ajax-modal", )
class DeleteTemplate(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Template",
u"Delete Templates",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Template",
u"Deleted Templates",
count
)
name = "delete_cluster_template"
verbose_name = _("Delete Template")
classes = ("btn-terminate", "btn-danger")
def action(self, request, template_id):
saharaclient.cluster_template_delete(request, template_id)
class CreateClusterTemplate(tables.LinkAction):
name = "create"
verbose_name = _("Create Template")
url = ("horizon:project:data_processing.cluster_templates:"
"create-cluster-template")
classes = ("ajax-modal", "create-clustertemplate-btn")
icon = "plus"
class ConfigureClusterTemplate(tables.LinkAction):
name = "configure"
verbose_name = _("Configure Cluster Template")
url = ("horizon:project:data_processing.cluster_templates:"
"configure-cluster-template")
classes = ("ajax-modal", "configure-clustertemplate-btn")
icon = "plus"
attrs = {"style": "display: none"}
def render_node_groups(cluster_template):
node_groups = [node_group['name'] + ': ' + str(node_group['count'])
for node_group in cluster_template.node_groups]
return node_groups
class ClusterTemplatesTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link=("horizon:project:data_processing."
"cluster_templates:details"))
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
verbose_name=_("Hadoop Version"))
node_groups = tables.Column(render_node_groups,
verbose_name=_("Node Groups"),
wrap_list=True,
filters=(filters.unordered_list,))
description = tables.Column("description",
verbose_name=_("Description"))
class Meta:
name = "cluster_templates"
verbose_name = _("Cluster Templates")
table_actions = (UploadFile,
CreateClusterTemplate,
ConfigureClusterTemplate,
DeleteTemplate,)
row_actions = (CreateCluster,
CopyTemplate,
DeleteTemplate,)
| {
"content_hash": "c7320cf103995abde93e36cd9382284a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 73,
"avg_line_length": 33,
"alnum_prop": 0.600640551860064,
"repo_name": "froyobin/horizon",
"id": "0ef599d6778a8d6ec9f5e7cc7d974ac76e70d44e",
"size": "4605",
"binary": false,
"copies": "1",
"ref": "refs/heads/our_branch",
"path": "openstack_dashboard/dashboards/project/data_processing/cluster_templates/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64112"
},
{
"name": "JavaScript",
"bytes": "238175"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "4004600"
},
{
"name": "Shell",
"bytes": "17902"
}
],
"symlink_target": ""
} |
import sys
import datetime
from twisted.python import log
from twisted.internet import reactor
from twisted.internet.defer import Deferred, \
inlineCallbacks, \
returnValue
from twisted.internet.endpoints import serverFromString
from autobahn.twisted.util import sleep
from autobahn.wamp.router import RouterFactory
from autobahn.wamp.serializer import MsgPackSerializer
from autobahn.twisted.wamp import ApplicationSession
from autobahn.twisted.wamp import RouterSessionFactory
from autobahn.twisted.websocket import WampWebSocketServerFactory
from autobahn.twisted.rawsocket import WampRawSocketServerFactory
class MyBackendComponent(ApplicationSession):
"""
Application code goes here. This is an example component that provides
a simple procedure which can be called remotely from any WAMP peer.
It also publishes an event every second to some topic.
"""
def onConnect(self):
self.join("realm1")
@inlineCallbacks
def onJoin(self, details):
## subscribe to a topic
##
def onevent(*args, **kwargs):
print("Got event: {} {}".format(args, kwargs))
yield self.subscribe(onevent, 'com.myapp.topic2')
## register a procedure for remote calling
##
def utcnow():
print("Someone is calling me;)")
now = datetime.datetime.utcnow()
return now.strftime("%Y-%m-%dT%H:%M:%SZ")
yield self.register(utcnow, 'com.timeservice.now')
def square(x):
return x * x
yield self.register(square, 'com.math.square')
@inlineCallbacks
def slowsquare(x, delay = 1):
print("slowsquare with delay = {}".format(delay))
yield sleep(delay)
returnValue(x * x)
yield self.register(slowsquare, 'com.math.slowsquare')
def add2(a, b):
return a + b
yield self.register(add2, 'com.mathservice.add2')
def ping():
return
def add2(a, b):
return a + b
def stars(nick = "somebody", stars = 0):
return "{} starred {}x".format(nick, stars)
def orders(product, limit = 5):
return ["Product {}".format(i) for i in range(50)][:limit]
def arglen(*args, **kwargs):
return [len(args), len(kwargs)]
yield self.register(ping, 'com.arguments.ping')
yield self.register(add2, 'com.arguments.add2')
yield self.register(stars, 'com.arguments.stars')
yield self.register(orders, 'com.arguments.orders')
yield self.register(arglen, 'com.arguments.arglen')
def add_complex(a, ai, b, bi):
return CallResult(c = a + b, ci = ai + bi)
yield self.register(add_complex, 'com.myapp.add_complex')
def split_name(fullname):
forename, surname = fullname.split()
return CallResult(forename, surname)
yield self.register(split_name, 'com.myapp.split_name')
def numbers(start, end, prefix = "Number: "):
res = []
for i in range(start, end):
res.append(prefix + str(i))
return res
yield self.register(numbers, 'com.arguments.numbers')
## publish events to a topic
##
counter = 0
while True:
self.publish('com.myapp.topic1', counter)
print("Published event: {}".format(counter))
counter += 1
yield sleep(1)
if __name__ == '__main__':
## 0) start logging to console
log.startLogging(sys.stdout)
## 1) create a WAMP router factory
router_factory = RouterFactory()
## 2) create a WAMP router session factory
session_factory = RouterSessionFactory(router_factory)
## 3) Optionally, add embedded WAMP application sessions to the router
session_factory.add(MyBackendComponent())
## 4) create a WAMP-over-WebSocket transport server factory
transport_factory1 = WampWebSocketServerFactory(session_factory, debug = False)
## 5) start the server from a Twisted endpoint
server1 = serverFromString(reactor, "tcp:8080")
server1.listen(transport_factory1)
## 6) create a WAMP-over-RawSocket-MsgPack transport server factory
serializer = MsgPackSerializer()
serializer._serializer.ENABLE_V5 = False
transport_factory2 = WampRawSocketServerFactory(session_factory, serializer, debug = True)
## 7) start the server from a Twisted endpoint
server2 = serverFromString(reactor, "tcp:8090")
server2.listen(transport_factory2)
## 8) now enter the Twisted reactor loop
reactor.run()
| {
"content_hash": "1a339ab8475de5980301ff559e984600",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 93,
"avg_line_length": 29.82236842105263,
"alnum_prop": 0.6558570483123759,
"repo_name": "rafzi/WAMP_POCO",
"id": "2a8a1ed76017c158028d1afb3564e0c1c0b6b7cf",
"size": "5303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "65517"
},
{
"name": "CMake",
"bytes": "610"
},
{
"name": "Python",
"bytes": "5861"
}
],
"symlink_target": ""
} |
from CIM14.CPSM.Equipment.Core.IdentifiedObject import IdentifiedObject
class OperationalLimitType(IdentifiedObject):
"""A type of limit. The meaning of a specific limit is described in this class.
"""
def __init__(self, direction="high", acceptableDuration=0.0, *args, **kw_args):
"""Initialises a new 'OperationalLimitType' instance.
@param direction: The direction of the limit. Values are: "high", "absoluteValue", "low"
@param acceptableDuration: The nominal acceptable duration of the limit. Limits are commonly expressed in terms of the a time limit for which the limit is normally acceptable. The actual acceptable duration of a specific limit may depend on other local factors such as temperature or wind speed.
"""
#: The direction of the limit. Values are: "high", "absoluteValue", "low"
self.direction = direction
#: The nominal acceptable duration of the limit. Limits are commonly expressed in terms of the a time limit for which the limit is normally acceptable. The actual acceptable duration of a specific limit may depend on other local factors such as temperature or wind speed.
self.acceptableDuration = acceptableDuration
super(OperationalLimitType, self).__init__(*args, **kw_args)
_attrs = ["direction", "acceptableDuration"]
_attr_types = {"direction": str, "acceptableDuration": float}
_defaults = {"direction": "high", "acceptableDuration": 0.0}
_enums = {"direction": "OperationalLimitDirectionKind"}
_refs = []
_many_refs = []
| {
"content_hash": "a92b11bc3f74c9dec10eeaeb2dce5673",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 307,
"avg_line_length": 58.44444444444444,
"alnum_prop": 0.7097591888466414,
"repo_name": "rwl/PyCIM",
"id": "7640f65def08e116f2c056ad44b03dcdba56a757",
"size": "2678",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/CPSM/Equipment/OperationalLimits/OperationalLimitType.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_dim_u_medium.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "46d650dd3a7f9282fbfa7f09b5de5ca9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 73,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6905537459283387,
"repo_name": "obi-two/Rebelion",
"id": "18272390ddcb2db4a95479223b54980d2fd1fc7f",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/building/poi/shared_tatooine_dim_u_medium.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
"""Exceptions and return codes used internally.
External libraries should not used exceptions defined here.
"""
# Return codes from Robot and Rebot.
# RC below 250 is the number of failed critical tests and exactly 250
# means that number or more such failures.
INFO_PRINTED = 251 # --help or --version
DATA_ERROR = 252 # Invalid data or cli args
STOPPED_BY_USER = 253 # KeyboardInterrupt or SystemExit
FRAMEWORK_ERROR = 255 # Unexpected error
class RobotError(Exception):
"""Base class for Robot Framework errors.
Do not raise this method but use more specific errors instead.
"""
def __init__(self, message='', details=''):
Exception.__init__(self, message)
self.details = details
@property
def message(self):
return self.__unicode__()
class FrameworkError(RobotError):
"""Can be used when the core framework goes to unexpected state.
It is good to explicitly raise a FrameworkError if some framework
component is used incorrectly. This is pretty much same as
'Internal Error' and should of course never happen.
"""
class DataError(RobotError):
"""Used when the provided test data is invalid.
DataErrors are not caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class VariableError(DataError):
"""Used when variable does not exist.
VariableErrors are caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`).
"""
class TimeoutError(RobotError):
"""Used when a test or keyword timeout occurs.
This exception is handled specially so that execution of the
current test is always stopped immediately and it is not caught by
keywords executing other keywords (e.g. `Run Keyword And Expect
Error`).
"""
class Information(RobotError):
"""Used by argument parser with --help or --version."""
class ExecutionFailed(RobotError):
"""Used for communicating failures in test execution."""
def __init__(self, message, timeout=False, syntax=False, exit=False,
continue_on_failure=False, return_value=None):
if '\r\n' in message:
message = message.replace('\r\n', '\n')
from robotide.lib.robot.utils import cut_long_message
RobotError.__init__(self, cut_long_message(message))
self.timeout = timeout
self.syntax = syntax
self.exit = exit
self._continue_on_failure = continue_on_failure
self.return_value = return_value
@property
def dont_continue(self):
return self.timeout or self.syntax or self.exit
@property
def continue_on_failure(self):
return self._continue_on_failure
@continue_on_failure.setter
def continue_on_failure(self, continue_on_failure):
self._continue_on_failure = continue_on_failure
for child in getattr(self, '_errors', []):
child.continue_on_failure = continue_on_failure
def can_continue(self, teardown=False, templated=False, dry_run=False):
if dry_run:
return True
if self.dont_continue and not (teardown and self.syntax):
return False
if teardown or templated:
return True
return self.continue_on_failure
def get_errors(self):
return [self]
@property
def status(self):
return 'FAIL'
class HandlerExecutionFailed(ExecutionFailed):
def __init__(self, details):
timeout = isinstance(details.error, TimeoutError)
syntax = isinstance(details.error, DataError) \
and not isinstance(details.error, VariableError)
exit_on_failure = self._get(details.error, 'EXIT_ON_FAILURE')
continue_on_failure = self._get(details.error, 'CONTINUE_ON_FAILURE')
ExecutionFailed.__init__(self, details.message, timeout, syntax,
exit_on_failure, continue_on_failure)
self.full_message = details.message
self.traceback = details.traceback
def _get(self, error, attr):
return bool(getattr(error, 'ROBOT_' + attr, False))
class ExecutionFailures(ExecutionFailed):
def __init__(self, errors, message=None):
message = message or self._format_message([unicode(e) for e in errors])
ExecutionFailed.__init__(self, message, **self._get_attrs(errors))
self._errors = errors
def _format_message(self, messages):
if len(messages) == 1:
return messages[0]
lines = ['Several failures occurred:'] \
+ ['%d) %s' % (i+1, m) for i, m in enumerate(messages)]
return '\n\n'.join(lines)
def _get_attrs(self, errors):
return {'timeout': any(err.timeout for err in errors),
'syntax': any(err.syntax for err in errors),
'exit': any(err.exit for err in errors),
'continue_on_failure': all(err.continue_on_failure for err in errors)
}
def get_errors(self):
return self._errors
class UserKeywordExecutionFailed(ExecutionFailures):
def __init__(self, run_errors=None, teardown_errors=None):
errors = self._get_active_errors(run_errors, teardown_errors)
message = self._get_message(run_errors, teardown_errors)
ExecutionFailures.__init__(self, errors, message)
if run_errors and not teardown_errors:
self._errors = run_errors.get_errors()
else:
self._errors = [self]
def _get_active_errors(self, *errors):
return [err for err in errors if err]
def _get_message(self, run_errors, teardown_errors):
run_msg = unicode(run_errors or '')
td_msg = unicode(teardown_errors or '')
if not td_msg:
return run_msg
if not run_msg:
return 'Keyword teardown failed:\n%s' % td_msg
return '%s\n\nAlso keyword teardown failed:\n%s' % (run_msg, td_msg)
class ExecutionPassed(ExecutionFailed):
"""Base class for all exceptions communicating that execution passed.
Should not be raised directly, but more detailed exceptions used instead.
"""
def __init__(self, message=None, **kwargs):
ExecutionFailed.__init__(self, message or self._get_message(), **kwargs)
self._earlier_failures = []
def _get_message(self):
from robotide.lib.robot.utils import printable_name
return "Invalid '%s' usage." \
% printable_name(self.__class__.__name__, code_style=True)
def set_earlier_failures(self, failures):
if failures:
self._earlier_failures.extend(failures)
@property
def earlier_failures(self):
if not self._earlier_failures:
return None
return ExecutionFailures(self._earlier_failures)
@property
def status(self):
return 'PASS' if not self._earlier_failures else 'FAIL'
class PassExecution(ExecutionPassed):
"""Used by 'Pass Execution' keyword."""
def __init__(self, message):
ExecutionPassed.__init__(self, message)
class ContinueForLoop(ExecutionPassed):
"""Used by 'Continue For Loop' keyword."""
class ExitForLoop(ExecutionPassed):
"""Used by 'Exit For Loop' keyword."""
class ReturnFromKeyword(ExecutionPassed):
"""Used by 'Return From Keyword' keyword."""
def __init__(self, return_value):
ExecutionPassed.__init__(self, return_value=return_value)
class RemoteError(RobotError):
"""Used by Remote library to report remote errors."""
def __init__(self, message='', details='', fatal=False, continuable=False):
RobotError.__init__(self, message, details)
self.ROBOT_EXIT_ON_FAILURE = fatal
self.ROBOT_CONTINUE_ON_FAILURE = continuable
| {
"content_hash": "710a337718f444b9f27d88d7b0391ed1",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 85,
"avg_line_length": 32.3625,
"alnum_prop": 0.6438779451525686,
"repo_name": "fingeronthebutton/RIDE",
"id": "efad55f8b8eca166a750e1c9fe5fb91dafe18837",
"size": "8375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
} |
def words_to_count(words):
"""str -> str"""
result = ""
words = words.strip().split()
for word in range(len(words)):
result += str(len(words[word])) + " "
return result.strip()
# py.test exercise_11_4_16.py --cov=exercise_11_4_16.py --cov-report=html
def test_words_to_count():
assert words_to_count("I am a goofy goober!") == "1 2 1 5 7"
assert words_to_count("") == ""
if __name__ == "__main__":
with open("bacon.txt", 'r') as bacon:
bacon_words = bacon.read()
counted_words = words_to_count(bacon_words)
with open("bacon_count.txt", 'w') as bacon_count:
bacon_count.write(counted_words)
| {
"content_hash": "87ea3ba28e790fb0bfec870b2f7b5e47",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.5881458966565349,
"repo_name": "JSBCCA/pythoncode",
"id": "c3e10ced20e65b10cd9e4f60c3843fb237f7e454",
"size": "708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/exercise_11_4_16.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13548"
},
{
"name": "Python",
"bytes": "154427"
}
],
"symlink_target": ""
} |
from collections import deque
from django.db import transaction
import logging
from satori.ars import perf
from satori.core.models import *
from satori.events import Event, Client2
from dispatchers import dispatchers
from aggregators import aggregators
serial = 1
class CheckingMaster(Client2):
queue = 'checking_master_queue'
def __init__(self):
super(CheckingMaster, self).__init__()
self.temporary_submit_queue = deque()
self.test_result_queue = deque()
self.test_result_set = set()
self.test_result_judged_set = set()
self.test_suite_result_map = dict()
self.scheduled_test_results_map = dict()
self.ranking_map = dict()
self.scheduled_test_suite_results_map = dict()
self.test_results_to_rejudge = set()
self.test_suite_result_checked_test_results = dict()
self.test_suite_results_to_start = set()
self.test_suite_results_to_rejudge = set()
self.ranking_changed_contestants = set()
self.ranking_created_submits = dict()
self.ranking_checked_test_suite_results = dict()
self.rankings_to_rejudge = set()
self.rankings_to_stop = set()
self.test_suite_result_cache = {}
def init(self):
self.attach(self.queue)
self.map({'type': 'checking_rejudge_test'}, self.queue)
self.map({'type': 'checking_rejudge_test_suite'}, self.queue)
self.map({'type': 'checking_rejudge_submit_test_results'}, self.queue)
self.map({'type': 'checking_rejudge_submit_test_suite_results'}, self.queue)
self.map({'type': 'checking_checked_test_result'}, self.queue)
self.map({'type': 'checking_rejudge_test_result'}, self.queue)
self.map({'type': 'checking_rejudge_test_suite_result'}, self.queue)
self.map({'type': 'checking_rejudge_ranking'}, self.queue)
self.map({'type': 'checking_stop_ranking'}, self.queue)
self.map({'type': 'checking_default_test_suite_changed'}, self.queue)
self.map({'type': 'checking_changed_contest'}, self.queue)
self.map({'type': 'checking_changed_contestants'}, self.queue)
self.map({'type': 'checking_new_submit'}, self.queue)
self.map({'type': 'checking_new_temporary_submit'}, self.queue)
self.map({'type': 'checking_test_result_dequeue'}, self.queue)
for test_result in TestResult.objects.filter(pending=True, submit__problem__contest__archived=False):
if test_result.tester:
test_result.tester = None
test_result.save(force_update=True)
self.test_result_queue.append(test_result)
self.test_result_set.add(test_result)
for test_suite_result in TestSuiteResult.objects.filter(pending=True, submit__problem__contest__archived=False):
self.start_test_suite_result(test_suite_result)
for ranking in Ranking.objects.filter(contest__archived=False):
self.start_ranking(ranking)
for temporary_submit in TemporarySubmit.objects.filter(pending=True):
self.temporary_submit_queue.append(temporary_submit)
self.do_work()
def do_work(self):
flag = True
while flag:
flag = False
while self.rankings_to_stop:
flag = True
ranking = self.rankings_to_stop.pop()
self.do_stop_ranking(ranking)
while self.test_suite_results_to_start:
flag = True
test_suite_result = self.test_suite_results_to_start.pop()
self.start_test_suite_result(test_suite_result)
while self.test_results_to_rejudge:
flag = True
test_result = self.test_results_to_rejudge.pop()
self.do_rejudge_test_result(test_result)
while self.test_suite_results_to_rejudge:
flag = True
test_suite_result = self.test_suite_results_to_rejudge.pop()
self.do_rejudge_test_suite_result(test_suite_result)
while self.rankings_to_rejudge:
flag = True
ranking = self.rankings_to_rejudge.pop()
self.do_rejudge_ranking(ranking)
while self.test_suite_result_checked_test_results:
flag = True
(test_suite_result, test_results) = self.test_suite_result_checked_test_results.popitem()
self.do_notify_test_suite_result_checked_test_results(test_suite_result, test_results)
while self.ranking_changed_contestants:
flag = True
ranking = self.ranking_changed_contestants.pop()
self.do_notify_ranking_changed_contestants(ranking)
while self.ranking_created_submits:
flag = True
(ranking, submits) = self.ranking_created_submits.popitem()
self.do_notify_ranking_created_submits(ranking, submits)
while self.ranking_checked_test_suite_results:
flag = True
(ranking, test_suite_results) = self.ranking_checked_test_suite_results.popitem()
self.do_notify_ranking_checked_test_suite_results(ranking, test_suite_results)
def do_rejudge_test_result(self, test_result):
if test_result in self.test_result_set:
logging.debug('checking master: rejudge test result %s: in queue', test_result.id)
elif test_result in self.test_result_judged_set:
logging.debug('checking master: rejudge test result %s: in judge', test_result.id)
test_result.pending = True
test_result.tester = None
test_result.save(force_update=True)
test_result.oa_set_map({})
self.test_result_judged_set.remove(test_result)
self.test_result_queue.append(test_result)
self.test_result_set.add(test_result)
else:
logging.debug('checking master: rejudge test result %s: rejudge', test_result.id)
test_result.pending = True
test_result.tester = None
test_result.save(force_update=True)
test_result.oa_set_map({})
self.test_result_queue.append(test_result)
self.test_result_set.add(test_result)
for test_suite_result in self.scheduled_test_results_map.get(test_result, []):
self.test_suite_results_to_rejudge.add(test_suite_result)
for test_suite_result in TestSuiteResult.objects.filter(submit=test_result.submit, test_suite__tests=test_result.test):
if not test_suite_result in self.test_suite_result_map:
self.test_suite_results_to_rejudge.add(test_suite_result)
def do_notify_test_suite_result_checked_test_results(self, test_suite_result, checked_test_results):
logging.debug('checking master: notify test suite result %s: checked test results %s', test_suite_result.id, ','.join(str(x.id) for x in checked_test_results))
self.call_test_suite_result(test_suite_result, 'checked_test_results', [checked_test_results])
def do_rejudge_test_suite_result(self, test_suite_result):
if test_suite_result in self.test_suite_result_map:
logging.debug('checking master: rejudge test suite result %s: running', test_suite_result.id)
self.stop_test_suite_result(test_suite_result)
test_suite_result.oa_set_map({})
self.start_test_suite_result(test_suite_result)
else:
logging.debug('checking master: rejudge test suite result %s: not running', test_suite_result.id)
test_suite_result = TestSuiteResult.objects.get(id=test_suite_result.id)
for ranking in self.scheduled_test_suite_results_map.get(test_suite_result, []):
self.rankings_to_rejudge.add(ranking)
test_suite_result.pending = True
test_suite_result.save(force_update=True)
test_suite_result.oa_set_map({})
self.start_test_suite_result(test_suite_result)
def do_notify_ranking_created_submits(self, ranking, created_submits):
logging.debug('checking master: notify ranking %s: created submits %s', ranking.id, ','.join(str(x.id) for x in created_submits))
self.call_ranking(ranking, 'created_submits', [created_submits])
def do_notify_ranking_changed_contestants(self, ranking):
logging.debug('checking master: notify ranking %s: changed contestants', ranking.id)
self.call_ranking(ranking, 'changed_contestants', [])
def do_notify_ranking_checked_test_suite_results(self, ranking, checked_test_suite_results):
logging.debug('checking master: notify ranking %s: checked test suite results %s', ranking.id, ','.join(str(x.id) for x in checked_test_suite_results))
self.call_ranking(ranking, 'checked_test_suite_results', [checked_test_suite_results])
def do_rejudge_ranking(self, ranking):
if ranking in self.ranking_map:
logging.debug('checking master: rejudge ranking %s: running', ranking.id)
self.stop_ranking(ranking)
self.start_ranking(ranking)
else:
logging.debug('checking master: rejudge ranking %s: not running', ranking.id)
self.start_ranking(ranking)
def do_stop_ranking(self, ranking):
logging.debug('checking master: running rankings: ' + ' '.join([str(x.id) for x in self.ranking_map]))
if ranking in self.ranking_map:
logging.debug('checking master: stop ranking %s: running', ranking.id)
self.stop_ranking(ranking)
else:
logging.debug('checking master: stop ranking %s: not running', ranking.id)
def handle_event(self, queue, event):
logging.debug('checking master: event %s', event.type)
if event.type == 'checking_checked_test_result':
test_result = TestResult.objects.get(id=event.id)
if test_result in self.test_result_judged_set:
logging.debug('checking master: checked test result %s', test_result.id)
self.test_result_judged_set.remove(test_result)
for test_suite_result in self.scheduled_test_results_map.get(test_result, []):
self.test_suite_result_checked_test_results.setdefault(test_suite_result, set()).add(test_result)
elif test_result in self.test_result_set:
logging.error('checking master: checked test in queue')
else:
logging.error('checking master: checked test not in queue')
elif event.type == 'checking_rejudge_test':
test = Test.objects.get(id=event.id)
logging.debug('checking master: rejudge test %s', test.id)
self.test_results_to_rejudge.update(test.test_results.all())
elif event.type == 'checking_rejudge_test_suite':
test_suite = TestSuite.objects.get(id=event.id)
logging.debug('checking master: rejudge test suite %s', test_suite.id)
self.test_suite_results_to_rejudge.update(test_suite.test_suite_results.all())
elif event.type == 'checking_rejudge_submit_test_results':
submit = Submit.objects.get(id=event.id)
logging.debug('checking master: rejudge submit test results %s', submit.id)
self.test_results_to_rejudge.update(submit.test_results.all())
elif event.type == 'checking_rejudge_submit_test_suite_results':
submit = Submit.objects.get(id=event.id)
logging.debug('checking master: rejudge submit test suite results %s', submit.id)
self.test_suite_results_to_rejudge.update(submit.test_suite_results.all())
elif event.type == 'checking_rejudge_test_result':
test_result = TestResult.objects.get(id=event.id)
logging.debug('checking master: rejudge test result %s', test_result.id)
self.test_results_to_rejudge.add(test_result)
elif event.type == 'checking_rejudge_test_suite_result':
test_suite_result = TestSuiteResult.objects.get(id=event.id)
logging.debug('checking master: rejudge test suite result %s', test_suite_result.id)
self.test_suite_results_to_rejudge.add(test_suite_result)
elif event.type == 'checking_rejudge_ranking':
ranking = Ranking.objects.get(id=event.id)
logging.debug('checking master: rejudge ranking %s', ranking.id)
self.rankings_to_rejudge.add(ranking)
elif event.type == 'checking_stop_ranking':
ranking = Ranking()
ranking.id = event.id
ranking.parent_entity_id = event.id
logging.debug('checking master: stop ranking %s', ranking.id)
self.rankings_to_stop.add(ranking)
elif event.type == 'checking_default_test_suite_changed':
problem_mapping = ProblemMapping.objects.get(id=event.id)
logging.debug('checking master: changed default test suite for problem mapping %s', problem_mapping.id)
for submit in Submit.objects.filter(problem=problem_mapping):
self.schedule_test_suite_result(None, submit, problem_mapping.default_test_suite)
elif event.type == 'checking_new_submit':
submit = Submit.objects.get(id=event.id)
logging.debug('checking master: new submit %s', submit.id)
self.schedule_test_suite_result(None, submit, submit.problem.default_test_suite)
for ranking in Ranking.objects.filter(contest=submit.problem.contest):
self.ranking_created_submits.setdefault(ranking, set()).add(submit)
elif event.type == 'checking_new_temporary_submit':
temporary_submit = TemporarySubmit.objects.get(id=event.id)
logging.debug('checking master: new temporary submit %s', temporary_submit.id)
self.temporary_submit_queue.append(temporary_submit)
elif event.type == 'checking_changed_contestants':
contest = Contest.objects.get(id=event.id)
logging.debug('checking master: changed contestants of %s', contest.id)
for ranking in Ranking.objects.filter(contest=contest):
self.ranking_changed_contestants.add(ranking)
elif event.type == 'checking_changed_contest':
contest = Contest.objects.get(id=event.id)
logging.debug('checking master: changed contest %s', contest.id)
if contest.archived:
for ranking in Ranking.objects.filter(contest=contest):
self.rankings_to_stop.add(ranking)
else:
for ranking in Ranking.objects.filter(contest=contest):
self.rankings_to_rejudge.add(ranking)
elif event.type == 'checking_test_result_dequeue':
e = Event(type='checking_test_result_dequeue_result')
e.tag = event.tag
if self.temporary_submit_queue:
temporary_submit = self.temporary_submit_queue.popleft()
temporary_submit.tester = Role.objects.get(id=event.tester_id)
temporary_submit.save(force_update=True)
e.test_result_id = -temporary_submit.id
elif self.test_result_queue:
test_result = self.test_result_queue.popleft()
self.test_result_set.remove(test_result)
self.test_result_judged_set.add(test_result)
test_result.tester = Role.objects.get(id=event.tester_id)
test_result.save(force_update=True)
e.test_result_id = test_result.id
else:
e.test_result_id = None
global serial
e.Aserial = serial
logging.debug('Check queue: dequeue by %s: %s (%s)', event.tester_id, e, serial)
serial = serial + 1
self.send(e)
self.do_work()
def start_test_suite_result(self, test_suite_result):
if test_suite_result in self.test_suite_result_map:
logging.warning('Attempted to start test suite result, but already running: %s', test_suite_result.id)
return
logging.debug('Starting test suite result: %s', test_suite_result.id)
dispatcher = dispatchers[test_suite_result.test_suite.dispatcher]
self.test_suite_result_map[test_suite_result] = dispatcher(self, test_suite_result)
self.call_test_suite_result(test_suite_result, 'init', [])
def call_test_suite_result(self, test_suite_result, name, args):
if test_suite_result not in self.test_suite_result_map:
logging.warning('Attempted to call test suite result, but not running: %s.%s', test_suite_result.id, name)
return
logging.debug('Calling test suite result: %s.%s', test_suite_result.id, name)
transaction.enter_transaction_management(True)
transaction.managed(True)
try:
getattr(self.test_suite_result_map[test_suite_result], name)(*args)
except:
transaction.rollback()
logging.exception('Test suite result failed: %s', test_suite_result.id)
self.finished_test_suite_result(test_suite_result)
try:
test_suite_result.status = 'INT'
test_suite_result.report = 'Internal error'
test_suite_result.save(force_update=True)
except:
transaction.rollback()
logging.exception('Test suite result cleanup failed: %s', test_suite_result.id)
transaction.commit()
transaction.managed(False)
transaction.leave_transaction_management()
# callback
def finished_test_suite_result(self, test_suite_result):
# get a fresh version from db
test_suite_result = TestSuiteResult.objects.get(id=test_suite_result.id)
test_suite_result.pending = False
test_suite_result.save(force_update=True)
for ranking in self.scheduled_test_suite_results_map.get(test_suite_result, []):
self.ranking_checked_test_suite_results.setdefault(ranking, set()).add(test_suite_result)
self.stop_test_suite_result(test_suite_result)
def stop_test_suite_result(self, test_suite_result):
if test_suite_result not in self.test_suite_result_map:
logging.warning('Attempted to stop test suite result, but not running: %s', test_suite_result.id)
return
logging.debug('Stopping test suite result: %s', test_suite_result.id)
for (test_result, test_suite_results) in self.scheduled_test_results_map.items():
if test_suite_result in test_suite_results:
test_suite_results.remove(test_suite_result)
if not test_suite_results:
del self.scheduled_test_results_map[test_result]
del self.test_suite_result_map[test_suite_result]
def start_ranking(self, ranking):
if ranking in self.ranking_map:
logging.warning('Attempted to start ranking, but already running: %s', ranking.id)
return
logging.debug('Starting ranking: %s', ranking.id)
aggregator = aggregators[ranking.aggregator]
self.ranking_map[ranking] = aggregator(self, ranking)
self.call_ranking(ranking, 'init', [])
self.call_ranking(ranking, 'changed_contestants', [])
for tsr in TestSuiteResult.objects.filter(submit__problem__contest=ranking.contest):
self.test_suite_result_cache[(tsr.test_suite_id, tsr.submit_id)] = tsr
self.call_ranking(ranking, 'created_submits', [Submit.objects.filter(problem__contest=ranking.contest)])
self.test_suite_result_cache = {}
def call_ranking(self, ranking, name, args):
if ranking not in self.ranking_map:
logging.warning('Attempted to call ranking, but not running: %s.%s', ranking.id, name)
return
logging.debug('Calling ranking: %s.%s', ranking.id, name)
perf.begin('ranking')
transaction.enter_transaction_management(True)
transaction.managed(True)
try:
getattr(self.ranking_map[ranking], name)(*args)
except:
transaction.rollback()
logging.exception('Ranking failed: %s', ranking.id)
self.stop_ranking(ranking)
try:
ranking.header = 'Internal error'
ranking.footer = ''
ranking.save(force_update=True)
RankingEntry.objects.filter(ranking=ranking).delete()
except:
transaction.rollback()
logging.exception('Ranking cleanup failed: %s', ranking.id)
transaction.commit()
transaction.managed(False)
transaction.leave_transaction_management()
perf.end('ranking')
def stop_ranking(self, ranking):
if ranking not in self.ranking_map:
logging.warning('Attempted to stop ranking, but not running: %s', ranking.id)
return
logging.debug('Stopping ranking: %s', ranking.id)
for (test_suite_result, rankings) in self.scheduled_test_results_map.items():
if ranking in rankings:
rankings.remove(ranking)
if not rankings:
del self.scheduled_test_results_map[test_suite_result]
del self.ranking_map[ranking]
# callback
def schedule_test_result(self, test_suite_result, submit, test):
(test_result, created) = TestResult.objects.get_or_create(submit=submit, test=test)
if (test_result in self.test_result_set) or (test_result in self.test_result_judged_set):
logging.debug('Scheduling test result: %s - already in queue', test_result.id)
elif test_result.pending:
logging.debug('Scheduling test result: %s - adding to queue', test_result.id)
self.test_result_queue.append(test_result)
self.test_result_set.add(test_result)
else:
logging.debug('Scheduling test result: %s - already checked', test_result.id)
if test_suite_result is not None:
self.test_suite_result_checked_test_results.setdefault(test_suite_result, set()).add(test_result)
if test_suite_result is not None:
self.scheduled_test_results_map.setdefault(test_result, set()).add(test_suite_result)
# callback
def schedule_test_suite_result(self, ranking, submit, test_suite):
if (test_suite.id, submit.id) in self.test_suite_result_cache:
test_suite_result = self.test_suite_result_cache[(test_suite.id, submit.id)]
created = False
else:
(test_suite_result, created) = TestSuiteResult.objects.get_or_create(submit=submit, test_suite=test_suite)
if test_suite_result in self.test_suite_result_map:
logging.debug('Scheduling test suite result: %s - already running', test_suite_result.id)
elif test_suite_result.pending:
logging.debug('Scheduling test suite result: %s - starting', test_suite_result.id)
self.test_suite_results_to_start.add(test_suite_result)
else:
logging.debug('Scheduling test suite result: %s - already checked', test_suite_result.id)
if ranking is not None:
self.ranking_checked_test_suite_results.setdefault(ranking, set()).add(test_suite_result)
if ranking is not None:
self.scheduled_test_suite_results_map.setdefault(test_suite_result, set()).add(ranking)
| {
"content_hash": "88b27c16a97f9c1129439d8eab460ebb",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 167,
"avg_line_length": 52.06828193832599,
"alnum_prop": 0.6326832776344177,
"repo_name": "zielmicha/satori",
"id": "8fbc129859a5d426c70fedf9d184cb358c027599",
"size": "23672",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "satori.core/satori/core/checking/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "165337"
},
{
"name": "CSS",
"bytes": "72202"
},
{
"name": "HTML",
"bytes": "56647"
},
{
"name": "Java",
"bytes": "270392"
},
{
"name": "JavaScript",
"bytes": "300430"
},
{
"name": "Makefile",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1572"
},
{
"name": "Python",
"bytes": "1011796"
},
{
"name": "Shell",
"bytes": "231478"
},
{
"name": "TeX",
"bytes": "17071"
}
],
"symlink_target": ""
} |
"""
Enable Gtk3 to be used interacive by IPython.
Authors: Thomi Richards
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from gi.repository import Gtk, GLib # @UnresolvedImport
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def _main_quit(*args, **kwargs):
Gtk.main_quit()
return False
def create_inputhook_gtk3(stdin_file):
def inputhook_gtk3():
GLib.io_add_watch(stdin_file, GLib.IO_IN, _main_quit)
Gtk.main()
return 0
return inputhook_gtk3
| {
"content_hash": "e70244249e3d4f8613f68a504390b3ab",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 32.94117647058823,
"alnum_prop": 0.3776785714285714,
"repo_name": "SlicerRt/SlicerDebuggingTools",
"id": "7c19235a39fc1c635eb697b814d0808b0c6845f8",
"size": "1139",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PyDevRemoteDebug/ptvsd-4.1.3/ptvsd/_vendored/pydevd/pydev_ipython/inputhookgtk3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "406"
},
{
"name": "C",
"bytes": "13361"
},
{
"name": "C++",
"bytes": "105521"
},
{
"name": "CMake",
"bytes": "21408"
},
{
"name": "Cython",
"bytes": "69580"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Python",
"bytes": "3900091"
},
{
"name": "Shell",
"bytes": "737"
}
],
"symlink_target": ""
} |
from django_markdown.admin import MarkdownModelAdmin
from django.contrib import admin
from . import models
from django_markdown.widgets import AdminMarkdownWidget
from django.db.models import TextField
class EntryAdmin(MarkdownModelAdmin):
list_display = ("title", "created")
prepopulated_fields = {"slug": ("title",)}
# Next line is a workaround for Python 2.x
formfield_overrides = {TextField: {'widget': AdminMarkdownWidget}}
admin.site.register(models.Entry, EntryAdmin) | {
"content_hash": "427044f09b44363d8df6ddd2f7381d28",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 32.9375,
"alnum_prop": 0.7191650853889943,
"repo_name": "moertoe1/moertoe123.github.io",
"id": "775ab70de28e76e6fb9cf5b05bb44aaf71a3953b",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5027"
},
{
"name": "HTML",
"bytes": "36345"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "40442"
}
],
"symlink_target": ""
} |
import pytest
import demistomock as demisto
from ServiceDeskPlus import Client, create_request_command, update_request_command, list_requests_command, \
linked_request_command, get_resolutions_list_command, delete_request_command, assign_request_command, \
pickup_request_command, modify_linked_request_command, add_resolution_command, generate_refresh_token, \
create_output, args_to_query, create_modify_linked_input_data, create_human_readable, resolution_human_readable, \
create_requests_list_info, create_fetch_list_info, fetch_incidents, close_request_command, create_udf_field
from test_data.response_constants import RESPONSE_CREATE_REQUEST, RESPONSE_UPDATE_REQUEST, \
RESPONSE_LIST_SINGLE_REQUEST, RESPONSE_LIST_MULTIPLE_REQUESTS, RESPONSE_LINKED_REQUEST_LIST, \
RESPONSE_RESOLUTION_LIST, RESPONSE_NO_RESOLUTION_LIST, RESPONSE_LINK_REQUEST, RESPONSE_UNLINK_REQUEST, \
RESPONSE_GENERATE_REFRESH_TOKEN, RESPONSE_FETCH_INCIDENTS
from test_data.result_constants import EXPECTED_CREATE_REQUEST, EXPECTED_UPDATE_REQUEST, EXPECTED_LIST_SINGLE_REQUEST, \
EXPECTED_LIST_MULTIPLE_REQUESTS, EXPECTED_LINKED_REQUEST_LIST, EXPECTED_RESOLUTION_LIST, EXPECTED_NO_RESOLUTION_LIST
COMMANDS_LIST_WITH_CONTEXT = [
# Given the create command, different fields that should be used to create the request, the response of the command
# and the expected result, validate that the output of the command and the expected result are identical
(create_request_command, {'subject': 'Create request test', 'mode': 'E-Mail', 'requester': 'First Last',
'level': 'Tier 1', 'impact': 'Affects Group', 'priority': 'High', 'status': 'On Hold',
'request_type': 'Incident', 'description': 'The description of the request',
'urgency': 'Normal', 'group': 'Network'}, RESPONSE_CREATE_REQUEST,
EXPECTED_CREATE_REQUEST),
# Given the update command, different fields that should be used to create the request, the response of the command
# and the expected result, validate that the output of the command and the expected result are identical
(update_request_command, {'request_id': '123640000000240013', 'description': 'Update the description',
'impact': 'Affects Business'}, RESPONSE_UPDATE_REQUEST, EXPECTED_UPDATE_REQUEST),
# Given list requests command, the id of the single request that should be returned, validate that the output
# context of the command is identical to the expected output
(list_requests_command, {'request_id': '123640000000240013'}, RESPONSE_LIST_SINGLE_REQUEST,
EXPECTED_LIST_SINGLE_REQUEST),
# Given list requests command, page size equal 3 and the response for 3 requests, validate that the output
# context of the command is identical to the expected output
(list_requests_command, {'page_size': '3'}, RESPONSE_LIST_MULTIPLE_REQUESTS, EXPECTED_LIST_MULTIPLE_REQUESTS),
# Given the linked request command, the id of the request that it's links should be checked and the response for the
# command, validate that the context output of the command is identical to the expected output.
(linked_request_command, {'request_id': '123640000000246001'}, RESPONSE_LINKED_REQUEST_LIST,
EXPECTED_LINKED_REQUEST_LIST),
# Given the get resolutions list command, the id of the request for which the resolution should be returned and the
# response in case there IS a resolution for the request, validate the context output of the command
(get_resolutions_list_command, {'request_id': '123640000000241001'}, RESPONSE_RESOLUTION_LIST,
EXPECTED_RESOLUTION_LIST),
# Given the get resolutions list command, the id of the request for which the resolution should be returned and the
# response in case there is NO a resolution for the request, validate the context output of the command
(get_resolutions_list_command, {'request_id': '123640000000241001'}, RESPONSE_NO_RESOLUTION_LIST,
EXPECTED_NO_RESOLUTION_LIST)
]
COMMANDS_LIST_WITHOUT_CONTEXT = [
# Given the delete command and the id of the request that should be deleted, validate the human readable output
(delete_request_command, {'request_id': '1234'}, {}, "### Successfully deleted request(s) ['1234']"),
# Given the delete command and multiple ids of requests that should be deleted, validate the human readable output
(delete_request_command, {'request_id': '1234,5678'}, {}, "### Successfully deleted request(s) ['1234', '5678']"),
# Given the close command and the id of the request that should be closed, validate the human readable output
(close_request_command, {'request_id': '1234'}, {}, '### Successfully closed request 1234'),
# Given the assign command and the id of the request that should be assigned, validate the human readable output
(assign_request_command, {'request_id': '1234'}, {}, '### Service Desk Plus request 1234 was successfully assigned'),
# Given the pickup command and the id of the request that should be picked up, validate the human readable output
(pickup_request_command, {'request_id': '1234'}, {},
'### Service Desk Plus request 1234 was successfully picked up'),
# Given the modify linked command with the 'Link' action and the ids of the requests that should be linked, verify
# that the human readable indicates that the requests were successfully linked
(modify_linked_request_command, {'action': 'Link', 'request_id': '1234', 'linked_requests_id': '5678'},
RESPONSE_LINK_REQUEST, '## Request successfully linked'),
# Given the modify linked command with the 'Unlink' action and the ids of the requests that should be linked, verify
# that the human readable indicates that the requests were successfully unlinked
(modify_linked_request_command, {'action': 'Unlink', 'request_id': '1234', 'linked_requests_id': '5678'},
RESPONSE_UNLINK_REQUEST, '## The request[s] link are removed successfully.'),
# Given the add resolution command, the id of the request the resolution should be added to, the resolution content
# and the add_to_linked_requests flag set to true, validate the human readable output
(add_resolution_command, {'request_id': '1234', 'resolution_content': 'resolution message',
'add_to_linked_requests': 'true'}, RESPONSE_UNLINK_REQUEST,
'### Resolution was successfully added to 1234 and the linked requests'),
# Given the add resolution command, the id of the request the resolution should be added to, the resolution content
# and the add_to_linked_requests flag set to true, validate the human readable output
(add_resolution_command, {'request_id': '1234', 'resolution_content': 'resolution message',
'add_to_linked_requests': 'false'}, RESPONSE_UNLINK_REQUEST,
'### Resolution was successfully added to 1234'),
]
REFRESH_TOKEN_COMMAND_CLOUD = [
# Given the generate refresh token command, a valid code that should be used and the response for this command,
# validate the human readable output
(generate_refresh_token, {'code': '147852369'}, RESPONSE_GENERATE_REFRESH_TOKEN, '### Refresh Token: 987654321\n '
'Please paste the Refresh Token in'
' the instance configuration and '
'save it for future use.'),
# Given the generate refresh token command, a code and an error message as the response, validate that the human
# readable is indicating an error.
(generate_refresh_token, {'code': '147852369'}, {'error': 'invalid_code'}, '### Error: invalid_code')]
# test commands with context:
@pytest.mark.parametrize('command, args, response, expected_result', COMMANDS_LIST_WITH_CONTEXT)
def test_commands_cloud(command, args, response, expected_result, mocker):
mocker.patch('ServiceDeskPlus.Client.get_access_token')
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token')
mocker.patch.object(client, 'http_request', return_value=response)
result = command(client, args)
assert expected_result == result[1]
# test commands with context:
@pytest.mark.parametrize('command, args, response, expected_result', COMMANDS_LIST_WITH_CONTEXT)
def test_commands_on_premise(command, args, response, expected_result, mocker):
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True)
mocker.patch.object(client, 'http_request', return_value=response)
result = command(client, args)
assert expected_result == result[1]
# test commands without context:
@pytest.mark.parametrize('command, args, response, expected_result', COMMANDS_LIST_WITHOUT_CONTEXT)
def test_command_hr_cloud(command, args, response, expected_result, mocker):
mocker.patch('ServiceDeskPlus.Client.get_access_token')
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token')
mocker.patch.object(client, 'http_request', return_value=response)
result = command(client, args)
assert expected_result == result[0]
# test commands without context:
@pytest.mark.parametrize('command, args, response, expected_result', COMMANDS_LIST_WITHOUT_CONTEXT)
def test_command_hr_on_premise(command, args, response, expected_result, mocker):
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True)
mocker.patch.object(client, 'http_request', return_value=response)
result = command(client, args)
assert expected_result == result[0]
@pytest.mark.parametrize('command, args, response, expected_result', REFRESH_TOKEN_COMMAND_CLOUD)
def test_refresh_token_command_cloud(command, args, response, expected_result, mocker):
mocker.patch('ServiceDeskPlus.Client.get_access_token')
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token')
mocker.patch.object(client, 'http_request', return_value=response)
result = command(client, args)
assert expected_result == result[0]
def test_refresh_token_command_on_premise(mocker):
"""
Given:
- on-premise client
When:
- run refresh-token command
Then:
- Returns an error that this command cannot be executed for on-premise.
"""
mocker.patch('ServiceDeskPlus.Client.get_access_token')
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True)
mocker.patch.object(demisto, 'results')
with pytest.raises(SystemExit) as err:
generate_refresh_token(client, 'args')
assert err.type == SystemExit
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert "The command 'service-desk-plus-generate-refresh-token' can not be executed on on-premise."\
in results[0]['Contents']
# test helper functions:
def test_create_output():
input = RESPONSE_CREATE_REQUEST.get('request')
expected_output = EXPECTED_CREATE_REQUEST.get('ServiceDeskPlus(val.ID===obj.ID)').get('Request')
assert create_output(input) == expected_output
def test_args_to_query():
assign_input = {'group': 'group1', 'technician': 'tech name'}
expected_assign_output = {'request': {'group': {'name': 'group1'}, 'technician': {'name': 'tech name'}}}
assert args_to_query(assign_input) == expected_assign_output
create_input = {'subject': 'request subject', 'group': 'group1', 'impact': 'Affects Business', 'requester': 'name'}
expected_create_output = {'request': {'subject': 'request subject', 'group': {'name': 'group1'},
'impact': {'name': 'Affects Business'}, 'requester': {'name': 'name'}}}
assert args_to_query(create_input) == expected_create_output
def test_create_modify_linked_input_data():
linked_request_id = ['1234']
comment = 'testing one request'
expected_output = {'link_requests': [{'linked_request': {'id': '1234'}, 'comments': 'testing one request'}]}
assert create_modify_linked_input_data(linked_request_id, comment) == expected_output
linked_request_id = ['1234', '5678']
comment = 'testing two request'
expected_output = {'link_requests': [{'linked_request': {'id': '1234'}, 'comments': 'testing two request'},
{'linked_request': {'id': '5678'}, 'comments': 'testing two request'}]}
assert create_modify_linked_input_data(linked_request_id, comment) == expected_output
linked_request_id = ['1234', '5678', '0912']
expected_output = {'link_requests': [{'linked_request': {'id': '1234'}},
{'linked_request': {'id': '5678'}},
{'linked_request': {'id': '0912'}}]}
assert create_modify_linked_input_data(linked_request_id, '') == expected_output
def test_create_human_readable():
input = {'CreatedTime': 'creation_time', 'Id': '1234', 'Requester': {'name': 'First Last', 'mobile': None,
'id': '123640000000244019', 'photo_url': 'url',
'is_vip_user': False, 'department': None},
'Technician': {'email_id': 'i@id', 'cost_per_hour': '0', 'phone': None, 'name': 'tech1'}, 'Status': 'Open',
'Subject': 'test human readable'}
expected_output = {'CreatedTime': 'creation_time', 'Id': '1234', 'Requester': 'First Last', 'Technician': 'tech1',
'Status': 'Open', 'Subject': 'test human readable'}
assert create_human_readable(input) == expected_output
def test_resolution_human_readable():
input = {'Content': 'res contents', 'SubmittedOn': 'submittion_date', 'SubmittedBy': {'email_id': 'i@id',
'phone': None,
'name': 'submitter'}}
expected_output = {'Content': 'res contents', 'SubmittedOn': 'submittion_date', 'SubmittedBy': 'submitter'}
assert resolution_human_readable(input) == expected_output
def test_create_requests_list_info():
start_index, row_count, search_fields, filter_by = '0', '15', 'a, b, c', 'filter'
expected_output = {'list_info': {'start_index': '0', 'row_count': '15', 'search_fields': 'a, b, c',
'filter_by': 'filter', 'sort_field': 'created_time', 'sort_order': 'asc'}}
assert create_requests_list_info(start_index, row_count, search_fields, filter_by) == expected_output
def test_create_fetch_list_info():
# Check empty status list:
time_from, time_to, status, fetch_filter, fetch_limit = 'from', 'to', [], '', 10
expected_output = {'list_info': {'search_criteria': [{'field': 'created_time', 'values': ['from', 'to'],
'condition': 'between'}],
'sort_field': 'created_time', 'sort_order': 'asc', 'row_count': 10}}
assert create_fetch_list_info(time_from, time_to, status, fetch_filter, fetch_limit) == expected_output
# Check one status:
time_from, time_to, status, fetch_filter, fetch_limit = 'from', 'to', ['status'], '', 10
expected_output = {'list_info': {'search_criteria': [{'field': 'created_time', 'values': ['from', 'to'],
'condition': 'between'},
{'field': 'status.name', 'values': ['status'],
'condition': 'is', 'logical_operator': 'AND'}],
'sort_field': 'created_time', 'sort_order': 'asc', 'row_count': 10}}
assert create_fetch_list_info(time_from, time_to, status, fetch_filter, fetch_limit) == expected_output
# Check multiple status:
time_from, time_to, status, fetch_filter, fetch_limit = 'from', 'to', ['status1', 'status2'], '', 10
expected_output = {'list_info': {'search_criteria': [{'field': 'created_time', 'values': ['from', 'to'],
'condition': 'between'},
{'field': 'status.name', 'values': ['status1', 'status2'],
'condition': 'is', 'logical_operator': 'AND'}],
'sort_field': 'created_time', 'sort_order': 'asc', 'row_count': 10}}
assert create_fetch_list_info(time_from, time_to, status, fetch_filter, fetch_limit) == expected_output
time_from, time_to, status, fetch_limit = 'from', 'to', ['status'], 15
fetch_filter = "{'field': 'technician.name', 'values': 'tech1,tech2', 'condition': 'is', 'logical_operator':'AND'}"
expected_output = {'list_info': {'search_criteria': [{'field': 'created_time', 'values': ['from', 'to'],
'condition': 'between'},
{'field': 'technician.name', 'condition': 'is',
'values': ['tech1', 'tech2'], 'logical_operator': 'AND'}],
'sort_field': 'created_time', 'sort_order': 'asc', 'row_count': 15}}
assert create_fetch_list_info(time_from, time_to, status, fetch_filter, fetch_limit) == expected_output
time_from, time_to, status, fetch_limit = 'from', 'to', ['status'], 20
fetch_filter = "{'field':'technician.name','values':'tech1,tech2','condition':'is','logical_operator':'AND'}," \
"{'field':'group.name','values':'group1','condition':'is','logical_operator':'AND'}"
expected_output = {'list_info': {'search_criteria': [{'field': 'created_time', 'values': ['from', 'to'],
'condition': 'between'},
{'field': 'technician.name', 'condition': 'is',
'values': ['tech1', 'tech2'], 'logical_operator': 'AND'},
{'field': 'group.name', 'condition': 'is',
'values': ['group1'], 'logical_operator': 'AND'}],
'sort_field': 'created_time', 'sort_order': 'asc', 'row_count': 20}}
assert create_fetch_list_info(time_from, time_to, status, fetch_filter, fetch_limit) == expected_output
def test_create_udf_field():
udf_input = 'key1:val1'
expected_output = {'key1': 'val1'}
assert create_udf_field(udf_input) == expected_output
udf_input = "{'key1':'val1'}"
expected_output = {'key1': 'val1'}
assert create_udf_field(udf_input) == expected_output
udf_input = 'key1:val1,key2:val2'
expected_output = {'key1': 'val1', 'key2': 'val2'}
assert create_udf_field(udf_input) == expected_output
invalid_udf_inputs = ['key1:val1,key2', 'key1,val1', 'key1', ':val1']
for udf_input in invalid_udf_inputs:
try:
create_udf_field(udf_input)
except Exception as e:
assert 'Illegal udf fields format' in e.args[0]
def test_fetch_incidents_cloud(mocker):
"""
Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the parse_date_range.
- mock the date_to_timestamp.
- mock the create_fetch_list_info.
- mock the Client's get_requests command.
Then
- run the fetch incidents command using the Client.
Validate the length of the results and the different fields of the fetched incidents.
"""
mocker.patch('ServiceDeskPlus.Client.get_access_token')
mocker.patch('ServiceDeskPlus.parse_date_range', return_value=('2020-06-23 04:18:00', 'never mind'))
mocker.patch('ServiceDeskPlus.date_to_timestamp', return_value='1592918317168')
mocker.patch('ServiceDeskPlus.create_fetch_list_info', return_value={})
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token',
fetch_time='1 hour', fetch_limit=3, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 3
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token',
fetch_time='1 hour', fetch_limit=2, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 2
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token',
fetch_time='1 hour', fetch_limit=1, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 1
assert incidents[0].get('name') == 'Test fetch incidents - 1234'
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', 'refresh_token',
fetch_time='1 hour', fetch_limit=0, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 0
def test_fetch_incidents_on_premise(mocker):
"""
Unit test
Given
- fetch incidents command
- command args
- command raw response
When
- mock the parse_date_range.
- mock the date_to_timestamp.
- mock the create_fetch_list_info.
- mock the Client's get_requests command.
Then
- run the fetch incidents command using the Client.
Validate the length of the results and the different fields of the fetched incidents.
"""
mocker.patch('ServiceDeskPlus.parse_date_range', return_value=('2020-06-23 04:18:00', 'never mind'))
mocker.patch('ServiceDeskPlus.date_to_timestamp', return_value='1592918317168')
mocker.patch('ServiceDeskPlus.create_fetch_list_info', return_value={})
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True,
fetch_time='1 hour', fetch_limit=3, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 3
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True,
fetch_time='1 hour', fetch_limit=2, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 2
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True,
fetch_time='1 hour', fetch_limit=1, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 1
assert incidents[0].get('name') == 'Test fetch incidents - 1234'
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True,
fetch_time='1 hour', fetch_limit=0, fetch_status=['Open'])
mocker.patch.object(client, 'get_requests', return_value=RESPONSE_FETCH_INCIDENTS)
incidents = fetch_incidents(client)
assert len(incidents) == 0
def test_test_module_cloud(mocker):
"""
Unit test
Given
- test module command
- command raw response
When
- mock the parse_date_range.
- mock the Client's send_request.
Then
- run the test module command using the Client
Validate the content of the HumanReadable.
"""
from ServiceDeskPlus import test_module as module
mocker.patch('ServiceDeskPlus.Client.get_access_token')
client = Client('server_url', 'use_ssl', 'use_proxy', 'client_id', 'client_secret', refresh_token='refresh_token')
mocker.patch('ServiceDeskPlus.parse_date_range', return_value=('2020-06-23 04:18:00', 'never mind'))
mocker.patch.object(client, 'http_request', return_value=RESPONSE_FETCH_INCIDENTS)
result = module(client)
assert result == 'ok'
def test_test_module_on_premise(mocker):
"""
Unit test
Given
- test module command on premise
- command raw response
When
- mock the parse_date_range.
- mock the Client's send_request.
Then
- run the test module command using the Client
Validate the content of the HumanReadable.
"""
from ServiceDeskPlus import test_module as module
client = Client('server_url', 'use_ssl', 'use_proxy', technician_key='technician_key', on_premise=True)
mocker.patch('ServiceDeskPlus.parse_date_range', return_value=('2020-06-23 04:18:00', 'never mind'))
mocker.patch.object(client, 'http_request', return_value=RESPONSE_FETCH_INCIDENTS)
result = module(client)
assert result == 'ok'
| {
"content_hash": "db2e22304417f2bf09457d95a00ddfc8",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 121,
"avg_line_length": 59.594907407407405,
"alnum_prop": 0.6341425519518353,
"repo_name": "VirusTotal/content",
"id": "6e2978a84ee6a6142d29210b25f130068e9ba997",
"size": "25745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/ServiceDeskPlus/Integrations/ServiceDeskPlus/ServiceDeskPlus_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""Linux specific tests."""
from __future__ import division
import collections
import contextlib
import errno
import glob
import io
import os
import pprint
import re
import shutil
import socket
import struct
import tempfile
import textwrap
import time
import warnings
import psutil
from psutil import LINUX
from psutil._compat import PY3
from psutil._compat import u
from psutil.tests import call_until
from psutil.tests import get_kernel_version
from psutil.tests import importlib
from psutil.tests import MEMORY_TOLERANCE
from psutil.tests import mock
from psutil.tests import PYPY
from psutil.tests import pyrun
from psutil.tests import reap_children
from psutil.tests import retry_before_failing
from psutil.tests import run_test_module_by_name
from psutil.tests import safe_rmpath
from psutil.tests import sh
from psutil.tests import skip_on_not_implemented
from psutil.tests import TESTFN
from psutil.tests import ThreadTask
from psutil.tests import TRAVIS
from psutil.tests import unittest
from psutil.tests import which
HERE = os.path.abspath(os.path.dirname(__file__))
SIOCGIFADDR = 0x8915
SIOCGIFCONF = 0x8912
SIOCGIFHWADDR = 0x8927
if LINUX:
SECTOR_SIZE = 512
# =====================================================================
# --- utils
# =====================================================================
def get_ipv4_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(),
SIOCGIFADDR,
struct.pack('256s', ifname))[20:24])
def get_mac_address(ifname):
import fcntl
ifname = ifname[:15]
if PY3:
ifname = bytes(ifname, 'ascii')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
with contextlib.closing(s):
info = fcntl.ioctl(
s.fileno(), SIOCGIFHWADDR, struct.pack('256s', ifname))
if PY3:
def ord(x):
return x
else:
import __builtin__
ord = __builtin__.ord
return ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
def free_swap():
"""Parse 'free' cmd and return swap memory's s total, used and free
values.
"""
out = sh('free -b')
lines = out.split('\n')
for line in lines:
if line.startswith('Swap'):
_, total, used, free = line.split()
nt = collections.namedtuple('free', 'total used free')
return nt(int(total), int(used), int(free))
raise ValueError(
"can't find 'Swap' in 'free' output:\n%s" % '\n'.join(lines))
def free_physmem():
"""Parse 'free' cmd and return physical memory's total, used
and free values.
"""
# Note: free can have 2 different formats, invalidating 'shared'
# and 'cached' memory which may have different positions so we
# do not return them.
# https://github.com/giampaolo/psutil/issues/538#issuecomment-57059946
out = sh('free -b')
lines = out.split('\n')
for line in lines:
if line.startswith('Mem'):
total, used, free, shared = \
[int(x) for x in line.split()[1:5]]
nt = collections.namedtuple(
'free', 'total used free shared output')
return nt(total, used, free, shared, out)
raise ValueError(
"can't find 'Mem' in 'free' output:\n%s" % '\n'.join(lines))
def vmstat(stat):
out = sh("vmstat -s")
for line in out.split("\n"):
line = line.strip()
if stat in line:
return int(line.split(' ')[0])
raise ValueError("can't find %r in 'vmstat' output" % stat)
def get_free_version_info():
out = sh("free -V").strip()
return tuple(map(int, out.split()[-1].split('.')))
# =====================================================================
# --- system virtual memory
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemVirtualMemory(unittest.TestCase):
def test_total(self):
# free_value = free_physmem().total
# psutil_value = psutil.virtual_memory().total
# self.assertEqual(free_value, psutil_value)
vmstat_value = vmstat('total memory') * 1024
psutil_value = psutil.virtual_memory().total
self.assertAlmostEqual(vmstat_value, psutil_value)
# Older versions of procps used slab memory to calculate used memory.
# This got changed in:
# https://gitlab.com/procps-ng/procps/commit/
# 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
@unittest.skipUnless(
LINUX and get_free_version_info() >= (3, 3, 12), "old free version")
@retry_before_failing()
def test_used(self):
free = free_physmem()
free_value = free.used
psutil_value = psutil.virtual_memory().used
self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, free.output))
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
@retry_before_failing()
def test_free(self):
# _, _, free_value, _ = free_physmem()
# psutil_value = psutil.virtual_memory().free
# self.assertAlmostEqual(
# free_value, psutil_value, delta=MEMORY_TOLERANCE)
vmstat_value = vmstat('free memory') * 1024
psutil_value = psutil.virtual_memory().free
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_buffers(self):
vmstat_value = vmstat('buffer memory') * 1024
psutil_value = psutil.virtual_memory().buffers
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_active(self):
vmstat_value = vmstat('active memory') * 1024
psutil_value = psutil.virtual_memory().active
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_inactive(self):
vmstat_value = vmstat('inactive memory') * 1024
psutil_value = psutil.virtual_memory().inactive
self.assertAlmostEqual(
vmstat_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_shared(self):
free = free_physmem()
free_value = free.shared
if free_value == 0:
raise unittest.SkipTest("free does not support 'shared' column")
psutil_value = psutil.virtual_memory().shared
self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, free.output))
@retry_before_failing()
def test_available(self):
# "free" output format has changed at some point:
# https://github.com/giampaolo/psutil/issues/538#issuecomment-147192098
out = sh("free -b")
lines = out.split('\n')
if 'available' not in lines[0]:
raise unittest.SkipTest("free does not support 'available' column")
else:
free_value = int(lines[1].split()[-1])
psutil_value = psutil.virtual_memory().available
self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE,
msg='%s %s \n%s' % (free_value, psutil_value, out))
def test_warnings_mocked(self):
def open_mock(name, *args, **kwargs):
if name == '/proc/meminfo':
return io.BytesIO(textwrap.dedent("""\
Active(anon): 6145416 kB
Active(file): 2950064 kB
Buffers: 287952 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemAvailable: 6574984 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
SReclaimable: 346648 kB
""").encode())
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"memory stats couldn't be determined", str(w.message))
self.assertIn("cached", str(w.message))
self.assertIn("shared", str(w.message))
self.assertIn("active", str(w.message))
self.assertIn("inactive", str(w.message))
self.assertEqual(ret.cached, 0)
self.assertEqual(ret.active, 0)
self.assertEqual(ret.inactive, 0)
self.assertEqual(ret.shared, 0)
def test_avail_old_percent(self):
# Make sure that our calculation of avail mem for old kernels
# is off by max 10%.
from psutil._pslinux import calculate_avail_vmem
from psutil._pslinux import open_binary
mems = {}
with open_binary('/proc/meminfo') as f:
for line in f:
fields = line.split()
mems[fields[0]] = int(fields[1]) * 1024
a = calculate_avail_vmem(mems)
if b'MemAvailable:' in mems:
b = mems[b'MemAvailable:']
diff_percent = abs(a - b) / a * 100
self.assertLess(diff_percent, 10)
def test_avail_old_comes_from_kernel(self):
# Make sure "MemAvailable:" coluimn is used instead of relying
# on our internal algorithm to calculate avail mem.
def open_mock(name, *args, **kwargs):
if name == "/proc/meminfo":
return io.BytesIO(textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Active(file): 2950064 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemAvailable: 6574984 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
SReclaimable: 346648 kB
""").encode())
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(ret.available, 6574984 * 1024)
def test_avail_old_missing_fields(self):
# Remove Active(file), Inactive(file) and SReclaimable
# from /proc/meminfo and make sure the fallback is used
# (free + cached),
def open_mock(name, *args, **kwargs):
if name == "/proc/meminfo":
return io.BytesIO(textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
""").encode())
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024)
def test_avail_old_missing_zoneinfo(self):
# Remove /proc/zoneinfo file. Make sure fallback is used
# (free + cached).
def open_mock(name, *args, **kwargs):
if name == "/proc/meminfo":
return io.BytesIO(textwrap.dedent("""\
Active: 9444728 kB
Active(anon): 6145416 kB
Active(file): 2950064 kB
Buffers: 287952 kB
Cached: 4818144 kB
Inactive(file): 1578132 kB
Inactive(anon): 574764 kB
Inactive(file): 1567648 kB
MemFree: 2057400 kB
MemTotal: 16325648 kB
Shmem: 577588 kB
SReclaimable: 346648 kB
""").encode())
elif name == "/proc/zoneinfo":
raise IOError(errno.ENOENT, 'no such file or directory')
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, create=True, side_effect=open_mock) as m:
ret = psutil.virtual_memory()
assert m.called
self.assertEqual(ret.available, 2057400 * 1024 + 4818144 * 1024)
# =====================================================================
# --- system swap memory
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemSwapMemory(unittest.TestCase):
def test_total(self):
free_value = free_swap().total
psutil_value = psutil.swap_memory().total
return self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_used(self):
free_value = free_swap().used
psutil_value = psutil.swap_memory().used
return self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE)
@retry_before_failing()
def test_free(self):
free_value = free_swap().free
psutil_value = psutil.swap_memory().free
return self.assertAlmostEqual(
free_value, psutil_value, delta=MEMORY_TOLERANCE)
def test_warnings_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined", str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
def test_no_vmstat_mocked(self):
# see https://github.com/giampaolo/psutil/issues/722
with mock.patch('psutil._pslinux.open', create=True,
side_effect=IOError) as m:
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
ret = psutil.swap_memory()
assert m.called
self.assertEqual(len(ws), 1)
w = ws[0]
self.assertTrue(w.filename.endswith('psutil/_pslinux.py'))
self.assertIn(
"'sin' and 'sout' swap memory stats couldn't "
"be determined and were set to 0",
str(w.message))
self.assertEqual(ret.sin, 0)
self.assertEqual(ret.sout, 0)
# =====================================================================
# --- system CPU
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemCPU(unittest.TestCase):
@unittest.skipIf(TRAVIS, "unknown failure on travis")
def test_cpu_times(self):
fields = psutil.cpu_times()._fields
kernel_ver = re.findall('\d+\.\d+\.\d+', os.uname()[2])[0]
kernel_ver_info = tuple(map(int, kernel_ver.split('.')))
if kernel_ver_info >= (2, 6, 11):
self.assertIn('steal', fields)
else:
self.assertNotIn('steal', fields)
if kernel_ver_info >= (2, 6, 24):
self.assertIn('guest', fields)
else:
self.assertNotIn('guest', fields)
if kernel_ver_info >= (3, 2, 0):
self.assertIn('guest_nice', fields)
else:
self.assertNotIn('guest_nice', fields)
@unittest.skipUnless(os.path.exists("/sys/devices/system/cpu/online"),
"/sys/devices/system/cpu/online does not exist")
def test_cpu_count_logical_w_sysdev_cpu_online(self):
with open("/sys/devices/system/cpu/online") as f:
value = f.read().strip()
if "-" in str(value):
value = int(value.split('-')[1]) + 1
self.assertEqual(psutil.cpu_count(), value)
@unittest.skipUnless(os.path.exists("/sys/devices/system/cpu"),
"/sys/devices/system/cpu does not exist")
def test_cpu_count_logical_w_sysdev_cpu_num(self):
ls = os.listdir("/sys/devices/system/cpu")
count = len([x for x in ls if re.search("cpu\d+$", x) is not None])
self.assertEqual(psutil.cpu_count(), count)
@unittest.skipUnless(which("nproc"), "nproc utility not available")
def test_cpu_count_logical_w_nproc(self):
num = int(sh("nproc --all"))
self.assertEqual(psutil.cpu_count(logical=True), num)
@unittest.skipUnless(which("lscpu"), "lscpu utility not available")
def test_cpu_count_logical_w_lscpu(self):
out = sh("lscpu -p")
num = len([x for x in out.split('\n') if not x.startswith('#')])
self.assertEqual(psutil.cpu_count(logical=True), num)
def test_cpu_count_logical_mocked(self):
import psutil._pslinux
original = psutil._pslinux.cpu_count_logical()
# Here we want to mock os.sysconf("SC_NPROCESSORS_ONLN") in
# order to cause the parsing of /proc/cpuinfo and /proc/stat.
with mock.patch(
'psutil._pslinux.os.sysconf', side_effect=ValueError) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
assert m.called
# Let's have open() return emtpy data and make sure None is
# returned ('cause we mimick os.cpu_count()).
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_logical())
self.assertEqual(m.call_count, 2)
# /proc/stat should be the last one
self.assertEqual(m.call_args[0][0], '/proc/stat')
# Let's push this a bit further and make sure /proc/cpuinfo
# parsing works as expected.
with open('/proc/cpuinfo', 'rb') as f:
cpuinfo_data = f.read()
fake_file = io.BytesIO(cpuinfo_data)
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
# Finally, let's make /proc/cpuinfo return meaningless data;
# this way we'll fall back on relying on /proc/stat
def open_mock(name, *args, **kwargs):
if name.startswith('/proc/cpuinfo'):
return io.BytesIO(b"")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock, create=True):
self.assertEqual(psutil._pslinux.cpu_count_logical(), original)
def test_cpu_count_physical_mocked(self):
# Have open() return emtpy data and make sure None is returned
# ('cause we want to mimick os.cpu_count())
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertIsNone(psutil._pslinux.cpu_count_physical())
assert m.called
def test_cpu_freq_no_result(self):
with mock.patch("psutil._pslinux.glob.glob", return_value=[]):
self.assertIsNone(psutil.cpu_freq())
@unittest.skipIf(TRAVIS, "fails on Travis")
def test_cpu_freq_use_second_file(self):
# https://github.com/giampaolo/psutil/issues/981
def glob_mock(pattern):
if pattern.startswith("/sys/devices/system/cpu/cpufreq/policy"):
flags.append(None)
return []
else:
flags.append(None)
return orig_glob(pattern)
flags = []
orig_glob = glob.glob
with mock.patch("psutil._pslinux.glob.glob", side_effect=glob_mock,
create=True):
assert psutil.cpu_freq()
self.assertEqual(len(flags), 2)
# =====================================================================
# --- system CPU stats
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemCPUStats(unittest.TestCase):
@unittest.skipIf(TRAVIS, "fails on Travis")
def test_ctx_switches(self):
vmstat_value = vmstat("context switches")
psutil_value = psutil.cpu_stats().ctx_switches
self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
@unittest.skipIf(TRAVIS, "fails on Travis")
def test_interrupts(self):
vmstat_value = vmstat("interrupts")
psutil_value = psutil.cpu_stats().interrupts
self.assertAlmostEqual(vmstat_value, psutil_value, delta=500)
# =====================================================================
# --- system network
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemNetwork(unittest.TestCase):
def test_net_if_addrs_ips(self):
for name, addrs in psutil.net_if_addrs().items():
for addr in addrs:
if addr.family == psutil.AF_LINK:
self.assertEqual(addr.address, get_mac_address(name))
elif addr.family == socket.AF_INET:
self.assertEqual(addr.address, get_ipv4_address(name))
# TODO: test for AF_INET6 family
def test_net_if_stats(self):
for name, stats in psutil.net_if_stats().items():
try:
out = sh("ifconfig %s" % name)
except RuntimeError:
pass
else:
# Not always reliable.
# self.assertEqual(stats.isup, 'RUNNING' in out, msg=out)
self.assertEqual(stats.mtu,
int(re.findall('MTU:(\d+)', out)[0]))
@retry_before_failing()
def test_net_io_counters(self):
def ifconfig(nic):
ret = {}
out = sh("ifconfig %s" % name)
ret['packets_recv'] = int(re.findall('RX packets:(\d+)', out)[0])
ret['packets_sent'] = int(re.findall('TX packets:(\d+)', out)[0])
ret['errin'] = int(re.findall('errors:(\d+)', out)[0])
ret['errout'] = int(re.findall('errors:(\d+)', out)[1])
ret['dropin'] = int(re.findall('dropped:(\d+)', out)[0])
ret['dropout'] = int(re.findall('dropped:(\d+)', out)[1])
ret['bytes_recv'] = int(re.findall('RX bytes:(\d+)', out)[0])
ret['bytes_sent'] = int(re.findall('TX bytes:(\d+)', out)[0])
return ret
for name, stats in psutil.net_io_counters(pernic=True).items():
try:
ifconfig_ret = ifconfig(name)
except RuntimeError:
continue
self.assertAlmostEqual(
stats.bytes_recv, ifconfig_ret['bytes_recv'], delta=1024 * 5)
self.assertAlmostEqual(
stats.bytes_sent, ifconfig_ret['bytes_sent'], delta=1024 * 5)
self.assertAlmostEqual(
stats.packets_recv, ifconfig_ret['packets_recv'], delta=1024)
self.assertAlmostEqual(
stats.packets_sent, ifconfig_ret['packets_sent'], delta=1024)
self.assertAlmostEqual(
stats.errin, ifconfig_ret['errin'], delta=10)
self.assertAlmostEqual(
stats.errout, ifconfig_ret['errout'], delta=10)
self.assertAlmostEqual(
stats.dropin, ifconfig_ret['dropin'], delta=10)
self.assertAlmostEqual(
stats.dropout, ifconfig_ret['dropout'], delta=10)
@unittest.skipUnless(which('ip'), "'ip' utility not available")
@unittest.skipIf(TRAVIS, "skipped on Travis")
def test_net_if_names(self):
out = sh("ip addr").strip()
nics = [x for x in psutil.net_if_addrs().keys() if ':' not in x]
found = 0
for line in out.split('\n'):
line = line.strip()
if re.search("^\d+:", line):
found += 1
name = line.split(':')[1].strip()
self.assertIn(name, nics)
self.assertEqual(len(nics), found, msg="%s\n---\n%s" % (
pprint.pformat(nics), out))
@mock.patch('psutil._pslinux.socket.inet_ntop', side_effect=ValueError)
@mock.patch('psutil._pslinux.supports_ipv6', return_value=False)
def test_net_connections_ipv6_unsupported(self, supports_ipv6, inet_ntop):
# see: https://github.com/giampaolo/psutil/issues/623
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind(("::1", 0))
except socket.error:
pass
psutil.net_connections(kind='inet6')
def test_net_connections_mocked(self):
def open_mock(name, *args, **kwargs):
if name == '/proc/net/unix':
return io.StringIO(textwrap.dedent(u"""\
0: 00000003 000 000 0001 03 462170 @/tmp/dbus-Qw2hMPIU3n
0: 00000003 000 000 0001 03 35010 @/tmp/dbus-tB2X8h69BQ
0: 00000003 000 000 0001 03 34424 @/tmp/dbus-cHy80Y8O
000000000000000000000000000000000000000000000000000000
"""))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
psutil.net_connections(kind='unix')
assert m.called
# =====================================================================
# --- system disk
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestSystemDisks(unittest.TestCase):
@unittest.skipUnless(
hasattr(os, 'statvfs'), "os.statvfs() function not available")
@skip_on_not_implemented()
def test_disk_partitions_and_usage(self):
# test psutil.disk_usage() and psutil.disk_partitions()
# against "df -a"
def df(path):
out = sh('df -P -B 1 "%s"' % path).strip()
lines = out.split('\n')
lines.pop(0)
line = lines.pop(0)
dev, total, used, free = line.split()[:4]
if dev == 'none':
dev = ''
total, used, free = int(total), int(used), int(free)
return dev, total, used, free
for part in psutil.disk_partitions(all=False):
usage = psutil.disk_usage(part.mountpoint)
dev, total, used, free = df(part.mountpoint)
self.assertEqual(usage.total, total)
# 10 MB tollerance
if abs(usage.free - free) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.free, free))
if abs(usage.used - used) > 10 * 1024 * 1024:
self.fail("psutil=%s, df=%s" % (usage.used, used))
def test_disk_partitions_mocked(self):
# Test that ZFS partitions are returned.
with open("/proc/filesystems", "r") as f:
data = f.read()
if 'zfs' in data:
for part in psutil.disk_partitions():
if part.fstype == 'zfs':
break
else:
self.fail("couldn't find any ZFS partition")
else:
# No ZFS partitions on this system. Let's fake one.
fake_file = io.StringIO(u("nodev\tzfs\n"))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m1:
with mock.patch(
'psutil._pslinux.cext.disk_partitions',
return_value=[('/dev/sdb3', '/', 'zfs', 'rw')]) as m2:
ret = psutil.disk_partitions()
assert m1.called
assert m2.called
assert ret
self.assertEqual(ret[0].fstype, 'zfs')
def test_disk_io_counters_kernel_2_4_mocked(self):
# Tests /proc/diskstats parsing format for 2.4 kernels, see:
# https://github.com/giampaolo/psutil/issues/767
def open_mock(name, *args, **kwargs):
if name == '/proc/partitions':
return io.StringIO(textwrap.dedent(u"""\
major minor #blocks name
8 0 488386584 hda
"""))
elif name == '/proc/diskstats':
return io.StringIO(
u(" 3 0 1 hda 2 3 4 5 6 7 8 9 10 11 12"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
ret = psutil.disk_io_counters()
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
self.assertEqual(ret.read_time, 4)
self.assertEqual(ret.write_count, 5)
self.assertEqual(ret.write_merged_count, 6)
self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
self.assertEqual(ret.write_time, 8)
self.assertEqual(ret.busy_time, 10)
def test_disk_io_counters_kernel_2_6_full_mocked(self):
# Tests /proc/diskstats parsing format for 2.6 kernels,
# lines reporting all metrics:
# https://github.com/giampaolo/psutil/issues/767
def open_mock(name, *args, **kwargs):
if name == '/proc/partitions':
return io.StringIO(textwrap.dedent(u"""\
major minor #blocks name
8 0 488386584 hda
"""))
elif name == '/proc/diskstats':
return io.StringIO(
u(" 3 0 hda 1 2 3 4 5 6 7 8 9 10 11"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
ret = psutil.disk_io_counters()
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_merged_count, 2)
self.assertEqual(ret.read_bytes, 3 * SECTOR_SIZE)
self.assertEqual(ret.read_time, 4)
self.assertEqual(ret.write_count, 5)
self.assertEqual(ret.write_merged_count, 6)
self.assertEqual(ret.write_bytes, 7 * SECTOR_SIZE)
self.assertEqual(ret.write_time, 8)
self.assertEqual(ret.busy_time, 10)
def test_disk_io_counters_kernel_2_6_limited_mocked(self):
# Tests /proc/diskstats parsing format for 2.6 kernels,
# where one line of /proc/partitions return a limited
# amount of metrics when it bumps into a partition
# (instead of a disk). See:
# https://github.com/giampaolo/psutil/issues/767
def open_mock(name, *args, **kwargs):
if name == '/proc/partitions':
return io.StringIO(textwrap.dedent(u"""\
major minor #blocks name
8 0 488386584 hda
"""))
elif name == '/proc/diskstats':
return io.StringIO(
u(" 3 1 hda 1 2 3 4"))
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
ret = psutil.disk_io_counters()
assert m.called
self.assertEqual(ret.read_count, 1)
self.assertEqual(ret.read_bytes, 2 * SECTOR_SIZE)
self.assertEqual(ret.write_count, 3)
self.assertEqual(ret.write_bytes, 4 * SECTOR_SIZE)
self.assertEqual(ret.read_merged_count, 0)
self.assertEqual(ret.read_time, 0)
self.assertEqual(ret.write_merged_count, 0)
self.assertEqual(ret.write_time, 0)
self.assertEqual(ret.busy_time, 0)
# =====================================================================
# --- misc
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestMisc(unittest.TestCase):
def test_boot_time(self):
vmstat_value = vmstat('boot time')
psutil_value = psutil.boot_time()
self.assertEqual(int(vmstat_value), int(psutil_value))
@mock.patch('psutil.traceback.print_exc')
def test_no_procfs_on_import(self, tb):
my_procfs = tempfile.mkdtemp()
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 0 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 0 0 0 0 0 0 0 0 0 0\n')
try:
orig_open = open
def open_mock(name, *args, **kwargs):
if name.startswith('/proc'):
raise IOError(errno.ENOENT, 'rejecting access for test')
return orig_open(name, *args, **kwargs)
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
importlib.reload(psutil)
assert tb.called
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.cpu_percent)
self.assertRaises(IOError, psutil.cpu_percent, percpu=True)
self.assertRaises(IOError, psutil.cpu_times_percent)
self.assertRaises(
IOError, psutil.cpu_times_percent, percpu=True)
psutil.PROCFS_PATH = my_procfs
self.assertEqual(psutil.cpu_percent(), 0)
self.assertEqual(sum(psutil.cpu_times_percent()), 0)
# since we don't know the number of CPUs at import time,
# we awkwardly say there are none until the second call
per_cpu_percent = psutil.cpu_percent(percpu=True)
self.assertEqual(sum(per_cpu_percent), 0)
# ditto awkward length
per_cpu_times_percent = psutil.cpu_times_percent(percpu=True)
self.assertEqual(sum(map(sum, per_cpu_times_percent)), 0)
# much user, very busy
with open(os.path.join(my_procfs, 'stat'), 'w') as f:
f.write('cpu 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu0 1 0 0 0 0 0 0 0 0 0\n')
f.write('cpu1 1 0 0 0 0 0 0 0 0 0\n')
self.assertNotEqual(psutil.cpu_percent(), 0)
self.assertNotEqual(
sum(psutil.cpu_percent(percpu=True)), 0)
self.assertNotEqual(sum(psutil.cpu_times_percent()), 0)
self.assertNotEqual(
sum(map(sum, psutil.cpu_times_percent(percpu=True))), 0)
finally:
shutil.rmtree(my_procfs)
importlib.reload(psutil)
self.assertEqual(psutil.PROCFS_PATH, '/proc')
@unittest.skipUnless(
get_kernel_version() >= (2, 6, 36),
"prlimit() not available on this Linux kernel version")
def test_prlimit_availability(self):
# prlimit() should be available starting from kernel 2.6.36
p = psutil.Process(os.getpid())
p.rlimit(psutil.RLIMIT_NOFILE)
# if prlimit() is supported *at least* these constants should
# be available
self.assertTrue(hasattr(psutil, "RLIM_INFINITY"))
self.assertTrue(hasattr(psutil, "RLIMIT_AS"))
self.assertTrue(hasattr(psutil, "RLIMIT_CORE"))
self.assertTrue(hasattr(psutil, "RLIMIT_CPU"))
self.assertTrue(hasattr(psutil, "RLIMIT_DATA"))
self.assertTrue(hasattr(psutil, "RLIMIT_FSIZE"))
self.assertTrue(hasattr(psutil, "RLIMIT_LOCKS"))
self.assertTrue(hasattr(psutil, "RLIMIT_MEMLOCK"))
self.assertTrue(hasattr(psutil, "RLIMIT_NOFILE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NPROC"))
self.assertTrue(hasattr(psutil, "RLIMIT_RSS"))
self.assertTrue(hasattr(psutil, "RLIMIT_STACK"))
@unittest.skipUnless(
get_kernel_version() >= (3, 0),
"prlimit constants not available on this Linux kernel version")
def test_resource_consts_kernel_v(self):
# more recent constants
self.assertTrue(hasattr(psutil, "RLIMIT_MSGQUEUE"))
self.assertTrue(hasattr(psutil, "RLIMIT_NICE"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTPRIO"))
self.assertTrue(hasattr(psutil, "RLIMIT_RTTIME"))
self.assertTrue(hasattr(psutil, "RLIMIT_SIGPENDING"))
def test_boot_time_mocked(self):
with mock.patch('psutil._pslinux.open', create=True) as m:
self.assertRaises(
RuntimeError,
psutil._pslinux.boot_time)
assert m.called
def test_users_mocked(self):
# Make sure ':0' and ':0.0' (returned by C ext) are converted
# to 'localhost'.
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', ':0.0',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'localhost')
assert m.called
# ...otherwise it should be returned as-is
with mock.patch('psutil._pslinux.cext.users',
return_value=[('giampaolo', 'pts/2', 'foo',
1436573184.0, True)]) as m:
self.assertEqual(psutil.users()[0].host, 'foo')
assert m.called
def test_procfs_path(self):
tdir = tempfile.mkdtemp()
try:
psutil.PROCFS_PATH = tdir
self.assertRaises(IOError, psutil.virtual_memory)
self.assertRaises(IOError, psutil.cpu_times)
self.assertRaises(IOError, psutil.cpu_times, percpu=True)
self.assertRaises(IOError, psutil.boot_time)
# self.assertRaises(IOError, psutil.pids)
self.assertRaises(IOError, psutil.net_connections)
self.assertRaises(IOError, psutil.net_io_counters)
self.assertRaises(IOError, psutil.net_if_stats)
self.assertRaises(IOError, psutil.disk_io_counters)
self.assertRaises(IOError, psutil.disk_partitions)
self.assertRaises(psutil.NoSuchProcess, psutil.Process)
finally:
psutil.PROCFS_PATH = "/proc"
os.rmdir(tdir)
def test_sector_size_mock(self):
# Test SECTOR_SIZE fallback in case 'hw_sector_size' file
# does not exist.
def open_mock(name, *args, **kwargs):
if PY3 and isinstance(name, bytes):
name = name.decode()
if "hw_sector_size" in name:
flag.append(None)
raise IOError(errno.ENOENT, '')
else:
return orig_open(name, *args, **kwargs)
flag = []
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock):
psutil.disk_io_counters()
self.assertTrue(flag)
def test_issue_687(self):
# In case of thread ID:
# - pid_exists() is supposed to return False
# - Process(tid) is supposed to work
# - pids() should not return the TID
# See: https://github.com/giampaolo/psutil/issues/687
t = ThreadTask()
t.start()
try:
p = psutil.Process()
tid = p.threads()[1].id
assert not psutil.pid_exists(tid), tid
pt = psutil.Process(tid)
pt.as_dict()
self.assertNotIn(tid, psutil.pids())
finally:
t.stop()
# =====================================================================
# --- sensors
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
@unittest.skipUnless(hasattr(psutil, "sensors_battery") and
psutil.sensors_battery() is not None,
"no battery")
class TestSensorsBattery(unittest.TestCase):
@unittest.skipUnless(which("acpi"), "acpi utility not available")
def test_percent(self):
out = sh("acpi -b")
acpi_value = int(out.split(",")[1].strip().replace('%', ''))
psutil_value = psutil.sensors_battery().percent
self.assertAlmostEqual(acpi_value, psutil_value, delta=1)
@unittest.skipUnless(which("acpi"), "acpi utility not available")
def test_power_plugged(self):
out = sh("acpi -b")
if 'unknown' in out.lower():
return unittest.skip("acpi output not reliable")
if 'discharging at zero rate' in out:
plugged = True
else:
plugged = "Charging" in out.split('\n')[0]
self.assertEqual(psutil.sensors_battery().power_plugged, plugged)
def test_emulate_power_plugged(self):
# Pretend the AC power cable is connected.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/AC0/online"):
return io.BytesIO(b"1")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, True)
self.assertEqual(
psutil.sensors_battery().secsleft, psutil.POWER_TIME_UNLIMITED)
assert m.called
def test_emulate_power_not_plugged(self):
# Pretend the AC power cable is not connected.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/AC0/online"):
return io.BytesIO(b"0")
elif name.startswith("/sys/class/power_supply/BAT0/status"):
return io.BytesIO(b"discharging")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().power_plugged, False)
assert m.called
def test_emulate_power_undetermined(self):
# Pretend we can't know whether the AC power cable not
# connected (assert fallback to False).
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/AC0/online") or \
name.startswith("/sys/class/power_supply/AC/online"):
raise IOError(errno.ENOENT, "")
elif name.startswith("/sys/class/power_supply/BAT0/status"):
return io.BytesIO(b"???")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertIsNone(psutil.sensors_battery().power_plugged)
assert m.called
def test_emulate_no_base_files(self):
# Emulate a case where base metrics files are not present,
# in which case we're supposed to get None.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/BAT0/energy_now") or \
name.startswith("/sys/class/power_supply/BAT0/charge_now"):
raise IOError(errno.ENOENT, "")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertIsNone(psutil.sensors_battery())
assert m.called
def test_emulate_energy_full_0(self):
# Emulate a case where energy_full files returns 0.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/BAT0/energy_full"):
return io.BytesIO(b"0")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().percent, 0)
assert m.called
def test_emulate_energy_full_not_avail(self):
# Emulate a case where energy_full file does not exist.
# Expected fallback on /capacity.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/BAT0/energy_full"):
raise IOError(errno.ENOENT, "")
elif name.startswith("/sys/class/power_supply/BAT0/capacity"):
return io.BytesIO(b"88")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertEqual(psutil.sensors_battery().percent, 88)
assert m.called
def test_emulate_no_ac0_online(self):
# Emulate a case where /AC0/online file does not exist.
def path_exists_mock(name):
if name.startswith("/sys/class/power_supply/AC0/online"):
return False
else:
return orig_path_exists(name)
orig_path_exists = os.path.exists
with mock.patch("psutil._pslinux.os.path.exists",
side_effect=path_exists_mock) as m:
psutil.sensors_battery()
assert m.called
def test_emulate_no_power(self):
# Emulate a case where /AC0/online file nor /BAT0/status exist.
def open_mock(name, *args, **kwargs):
if name.startswith("/sys/class/power_supply/AC/online") or \
name.startswith("/sys/class/power_supply/AC0/online") or \
name.startswith("/sys/class/power_supply/BAT0/status"):
raise IOError(errno.ENOENT, "")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
self.assertIsNone(psutil.sensors_battery().power_plugged)
assert m.called
@unittest.skipUnless(LINUX, "LINUX only")
class TestSensorsTemperatures(unittest.TestCase):
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_emulate_eio_error(self):
def open_mock(name, *args, **kwargs):
if name.endswith("_input"):
raise OSError(errno.EIO, "")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
with warnings.catch_warnings(record=True) as ws:
self.assertEqual(psutil.sensors_temperatures(), {})
assert m.called
self.assertIn("ignoring", str(ws[0].message))
# =====================================================================
# --- test process
# =====================================================================
@unittest.skipUnless(LINUX, "LINUX only")
class TestProcess(unittest.TestCase):
def setUp(self):
safe_rmpath(TESTFN)
tearDown = setUp
def test_memory_full_info(self):
src = textwrap.dedent("""
import time
with open("%s", "w") as f:
time.sleep(10)
""" % TESTFN)
sproc = pyrun(src)
self.addCleanup(reap_children)
call_until(lambda: os.listdir('.'), "'%s' not in ret" % TESTFN)
p = psutil.Process(sproc.pid)
time.sleep(.1)
mem = p.memory_full_info()
maps = p.memory_maps(grouped=False)
self.assertAlmostEqual(
mem.uss, sum([x.private_dirty + x.private_clean for x in maps]),
delta=4096)
self.assertAlmostEqual(
mem.pss, sum([x.pss for x in maps]), delta=4096)
self.assertAlmostEqual(
mem.swap, sum([x.swap for x in maps]), delta=4096)
# On PYPY file descriptors are not closed fast enough.
@unittest.skipIf(PYPY, "unreliable on PYPY")
def test_open_files_mode(self):
def get_test_file():
p = psutil.Process()
giveup_at = time.time() + 2
while True:
for file in p.open_files():
if file.path == os.path.abspath(TESTFN):
return file
elif time.time() > giveup_at:
break
raise RuntimeError("timeout looking for test file")
#
with open(TESTFN, "w"):
self.assertEqual(get_test_file().mode, "w")
with open(TESTFN, "r"):
self.assertEqual(get_test_file().mode, "r")
with open(TESTFN, "a"):
self.assertEqual(get_test_file().mode, "a")
#
with open(TESTFN, "r+"):
self.assertEqual(get_test_file().mode, "r+")
with open(TESTFN, "w+"):
self.assertEqual(get_test_file().mode, "r+")
with open(TESTFN, "a+"):
self.assertEqual(get_test_file().mode, "a+")
# note: "x" bit is not supported
if PY3:
safe_rmpath(TESTFN)
with open(TESTFN, "x"):
self.assertEqual(get_test_file().mode, "w")
safe_rmpath(TESTFN)
with open(TESTFN, "x+"):
self.assertEqual(get_test_file().mode, "r+")
def test_open_files_file_gone(self):
# simulates a file which gets deleted during open_files()
# execution
p = psutil.Process()
files = p.open_files()
with tempfile.NamedTemporaryFile():
# give the kernel some time to see the new file
call_until(p.open_files, "len(ret) != %i" % len(files))
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.ENOENT, "")) as m:
files = p.open_files()
assert not files
assert m.called
# also simulate the case where os.readlink() returns EINVAL
# in which case psutil is supposed to 'continue'
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.EINVAL, "")) as m:
self.assertEqual(p.open_files(), [])
assert m.called
# --- mocked tests
def test_terminal_mocked(self):
with mock.patch('psutil._pslinux._psposix.get_terminal_map',
return_value={}) as m:
self.assertIsNone(psutil._pslinux.Process(os.getpid()).terminal())
assert m.called
# TODO: re-enable this test.
# def test_num_ctx_switches_mocked(self):
# with mock.patch('psutil._pslinux.open', create=True) as m:
# self.assertRaises(
# NotImplementedError,
# psutil._pslinux.Process(os.getpid()).num_ctx_switches)
# assert m.called
def test_cmdline_mocked(self):
# see: https://github.com/giampaolo/psutil/issues/639
p = psutil.Process()
fake_file = io.StringIO(u('foo\x00bar\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar']
assert m.called
fake_file = io.StringIO(u('foo\x00bar\x00\x00'))
with mock.patch('psutil._pslinux.open',
return_value=fake_file, create=True) as m:
p.cmdline() == ['foo', 'bar', '']
assert m.called
def test_readlink_path_deleted_mocked(self):
with mock.patch('psutil._pslinux.os.readlink',
return_value='/home/foo (deleted)'):
self.assertEqual(psutil.Process().exe(), "/home/foo")
self.assertEqual(psutil.Process().cwd(), "/home/foo")
def test_threads_mocked(self):
# Test the case where os.listdir() returns a file (thread)
# which no longer exists by the time we open() it (race
# condition). threads() is supposed to ignore that instead
# of raising NSP.
def open_mock(name, *args, **kwargs):
if name.startswith('/proc/%s/task' % os.getpid()):
raise IOError(errno.ENOENT, "")
else:
return orig_open(name, *args, **kwargs)
orig_open = open
patch_point = 'builtins.open' if PY3 else '__builtin__.open'
with mock.patch(patch_point, side_effect=open_mock) as m:
ret = psutil.Process().threads()
assert m.called
self.assertEqual(ret, [])
# ...but if it bumps into something != ENOENT we want an
# exception.
def open_mock(name, *args, **kwargs):
if name.startswith('/proc/%s/task' % os.getpid()):
raise IOError(errno.EPERM, "")
else:
return orig_open(name, *args, **kwargs)
with mock.patch(patch_point, side_effect=open_mock):
self.assertRaises(psutil.AccessDenied, psutil.Process().threads)
# not sure why (doesn't fail locally)
# https://travis-ci.org/giampaolo/psutil/jobs/108629915
@unittest.skipIf(TRAVIS, "unreliable on TRAVIS")
def test_exe_mocked(self):
with mock.patch('psutil._pslinux.os.readlink',
side_effect=OSError(errno.ENOENT, "")) as m:
# No such file error; might be raised also if /proc/pid/exe
# path actually exists for system processes with low pids
# (about 0-20). In this case psutil is supposed to return
# an empty string.
ret = psutil.Process().exe()
assert m.called
self.assertEqual(ret, "")
# ...but if /proc/pid no longer exist we're supposed to treat
# it as an alias for zombie process
with mock.patch('psutil._pslinux.os.path.lexists',
return_value=False):
self.assertRaises(psutil.ZombieProcess, psutil.Process().exe)
@unittest.skipUnless(LINUX, "LINUX only")
class TestProcessAgainstStatus(unittest.TestCase):
"""/proc/pid/stat and /proc/pid/status have many values in common.
Whenever possible, psutil uses /proc/pid/stat (it's faster).
For all those cases we check that the value found in
/proc/pid/stat (by psutil) matches the one found in
/proc/pid/status.
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def read_status_file(self, linestart):
with psutil._psplatform.open_text(
'/proc/%s/status' % self.proc.pid) as f:
for line in f:
line = line.strip()
if line.startswith(linestart):
value = line.partition('\t')[2]
try:
return int(value)
except ValueError:
return value
else:
raise ValueError("can't find %r" % linestart)
def test_name(self):
value = self.read_status_file("Name:")
self.assertEqual(self.proc.name(), value)
def test_status(self):
value = self.read_status_file("State:")
value = value[value.find('(') + 1:value.rfind(')')]
value = value.replace(' ', '-')
self.assertEqual(self.proc.status(), value)
def test_ppid(self):
value = self.read_status_file("PPid:")
self.assertEqual(self.proc.ppid(), value)
def test_num_threads(self):
value = self.read_status_file("Threads:")
self.assertEqual(self.proc.num_threads(), value)
def test_uids(self):
value = self.read_status_file("Uid:")
value = tuple(map(int, value.split()[1:4]))
self.assertEqual(self.proc.uids(), value)
def test_gids(self):
value = self.read_status_file("Gid:")
value = tuple(map(int, value.split()[1:4]))
self.assertEqual(self.proc.gids(), value)
@retry_before_failing()
def test_num_ctx_switches(self):
value = self.read_status_file("voluntary_ctxt_switches:")
self.assertEqual(self.proc.num_ctx_switches().voluntary, value)
value = self.read_status_file("nonvoluntary_ctxt_switches:")
self.assertEqual(self.proc.num_ctx_switches().involuntary, value)
def test_cpu_affinity(self):
value = self.read_status_file("Cpus_allowed_list:")
if '-' in str(value):
min_, max_ = map(int, value.split('-'))
self.assertEqual(
self.proc.cpu_affinity(), list(range(min_, max_ + 1)))
def test_cpu_affinity_eligible_cpus(self):
value = self.read_status_file("Cpus_allowed_list:")
with mock.patch("psutil._pslinux.per_cpu_times") as m:
self.proc._proc._get_eligible_cpus()
if '-' in str(value):
assert not m.called
else:
assert m.called
if __name__ == '__main__':
run_test_module_by_name(__file__)
| {
"content_hash": "9f804ed6b51d1c43e6aff292e746b4f6",
"timestamp": "",
"source": "github",
"line_count": 1475,
"max_line_length": 79,
"avg_line_length": 40.774915254237285,
"alnum_prop": 0.5489749430523918,
"repo_name": "jules185/IoT_Hackathon",
"id": "2d37b46467ec4084462d337ee6a713d7a9dc8b14",
"size": "60331",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".homeassistant/deps/psutil/tests/test_linux.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12396"
},
{
"name": "HTML",
"bytes": "1557"
},
{
"name": "JavaScript",
"bytes": "2843"
},
{
"name": "Python",
"bytes": "8347316"
}
],
"symlink_target": ""
} |
from setuptools import find_packages, setup
setup(
name = "acos-client",
version = "1.4.6",
packages = find_packages(),
author = "A10 Networks",
author_email = "mdurrant@a10networks.com",
description = "A10 Networks ACOS API Client",
license = "Apache",
keywords = "a10 axapi acos adc slb load balancer",
url = "https://github.com/a10networks/acos-client",
long_description = open('README.md').read(),
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires = ['requests>=2.3.0', 'six', 'uhashring'],
test_suite="acos_client.tests.test_suite"
)
| {
"content_hash": "c1e2d6ff1c0f79474bb0b6898768d66b",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 70,
"avg_line_length": 35.578947368421055,
"alnum_prop": 0.6057692307692307,
"repo_name": "mdurrant-b3/acos-client",
"id": "2a15633653eb852e805e53e8854e808af61b1e89",
"size": "1390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "477240"
}
],
"symlink_target": ""
} |
"""Python client for todo list organization using todo.txt format.
This is currently pre-alpha code but the parser in the `todotxt` module is
already able to parse `todo.txt`_ files.
.. _todo.txt: https://github.com/ginatrapani/todo.txt-cli/wiki/The-Todo.txt-Format
"""
name = 'PyDoDo'
version = '0.1a1'
description = __doc__.splitlines()
keywords = 'todo,gtd'
author = 'Christopher Arndt'
author_email = 'chris@chrisarndt.de'
url = 'http://chrisarndt.de/projects/%s/' % name
repository = 'https://github.com/SpotlightKid/%s.git' % name.lower()
download_url = url + 'download/'
license = 'MIT License'
platforms = 'POSIX, Windows, MacOS X'
long_description = "\n".join(description[2:]) % locals()
description = description[0]
classifiers = """\
Development Status :: 2 - Pre-alpha
Environment :: MacOS X
Environment :: Win32 (MS Windows)
Intended Audience :: Developers
Intended Audience :: End users
License :: OSI Approved :: MIT License
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: MacOS :: MacOS X
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Topic :: Multimedia :: Sound/Audio :: MIDI
Topic :: Software Development :: Libraries :: Python Modules
"""
classifiers = [c.strip() for c in classifiers.splitlines()
if c.strip() and not c.startswith('#')]
try: # Python 2.x
del c
except: pass
| {
"content_hash": "a9a0a6a31e239327c48c46f1f786f2bb",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 82,
"avg_line_length": 32.674418604651166,
"alnum_prop": 0.7138790035587189,
"repo_name": "SpotlightKid/pydodo",
"id": "75e9c45cdc18d50e21d5fd52ace38e563caab44a",
"size": "1495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/release.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18231"
}
],
"symlink_target": ""
} |
from wsgiref.simple_server import make_server
import sys
import json
import traceback
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from LoaderImpl import Loader
impl_Loader = Loader(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Loader'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Loader.import_data,
name='Loader.import_data',
types=[dict])
self.method_authentication['Loader.import_data'] = 'required'
self.rpc_service.add(impl_Loader.validate,
name='Loader.validate',
types=[dict])
self.method_authentication['Loader.validate'] = 'required'
self.rpc_service.add(impl_Loader.uploader,
name='Loader.uploader',
types=[dict])
self.method_authentication['Loader.uploader'] = 'required'
self.rpc_service.add(impl_Loader.download,
name='Loader.download',
types=[dict])
self.method_authentication['Loader.download'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
auth_req = self.method_authentication.get(req['method'],
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"Loader but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
else:
status = '200 OK'
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
if __name__ == "__main__":
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| {
"content_hash": "a31c5f096b06bd5d2ee10ac43dff6384",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 92,
"avg_line_length": 36.61303462321792,
"alnum_prop": 0.537853924459031,
"repo_name": "sjyoo/loader",
"id": "a226258adf5e7432cdd696e779b9343e5e297c47",
"size": "17999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/LoaderServer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4719"
},
{
"name": "Perl",
"bytes": "56404"
},
{
"name": "Python",
"bytes": "42052"
},
{
"name": "R",
"bytes": "1884"
}
],
"symlink_target": ""
} |
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mpp.models import SQLTestCase
class PersistentTableFreeTIDTestCase(SQLTestCase):
"""
@product_version gpdb: [4.3.9.0-]
"""
sql_dir = 'sqls/pt_corruption'
ans_dir = 'ans/pt_corruption'
out_dir = 'output/pt_corruption'
| {
"content_hash": "2d97fc501ffea333ef24480fb29f3632",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 72,
"avg_line_length": 34.18518518518518,
"alnum_prop": 0.7562296858071506,
"repo_name": "CraigHarris/gpdb",
"id": "fa6c276ce279222664effffc021024ed64b48411",
"size": "923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/test/tinc/tincrepo/mpp/gpdb/tests/storage/persistent_tables/test_PT_freeTID.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5196"
},
{
"name": "Batchfile",
"bytes": "11028"
},
{
"name": "C",
"bytes": "35172475"
},
{
"name": "C++",
"bytes": "8253554"
},
{
"name": "CMake",
"bytes": "47394"
},
{
"name": "CSS",
"bytes": "7068"
},
{
"name": "Csound Score",
"bytes": "179"
},
{
"name": "DTrace",
"bytes": "1160"
},
{
"name": "Fortran",
"bytes": "14777"
},
{
"name": "GDB",
"bytes": "576"
},
{
"name": "Gherkin",
"bytes": "928387"
},
{
"name": "HTML",
"bytes": "218703"
},
{
"name": "Java",
"bytes": "1011277"
},
{
"name": "Lex",
"bytes": "210708"
},
{
"name": "M4",
"bytes": "106028"
},
{
"name": "Makefile",
"bytes": "497812"
},
{
"name": "Objective-C",
"bytes": "7799"
},
{
"name": "PLSQL",
"bytes": "236252"
},
{
"name": "PLpgSQL",
"bytes": "53471803"
},
{
"name": "Perl",
"bytes": "4082990"
},
{
"name": "Perl6",
"bytes": "14219"
},
{
"name": "Python",
"bytes": "9788722"
},
{
"name": "Roff",
"bytes": "703079"
},
{
"name": "Ruby",
"bytes": "4910"
},
{
"name": "SQLPL",
"bytes": "3870842"
},
{
"name": "Shell",
"bytes": "504133"
},
{
"name": "XS",
"bytes": "8309"
},
{
"name": "XSLT",
"bytes": "5779"
},
{
"name": "Yacc",
"bytes": "485235"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
__author__ = 'Filipe Ximenes'
__email__ = 'filipeximenes@gmail.com'
__version__ = '1.0'
from .adapters import (
generate_wrapper_from_adapter,
TapiocaAdapter,
FormAdapterMixin, JSONAdapterMixin)
| {
"content_hash": "3106093016d65cd9c95efe8e3ab89262",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 39,
"avg_line_length": 22.727272727272727,
"alnum_prop": 0.7,
"repo_name": "era/tapioca-wrapper",
"id": "010531914802add6bc3e2bbca9f1164e4bb6a349",
"size": "267",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tapioca/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1499"
},
{
"name": "Python",
"bytes": "44045"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.