text stringlengths 4 1.02M | meta dict |
|---|---|
"""Create the asset."""
import argparse
import os
import subprocess
NODE_URL = "https://nodejs.org/dist/v12.16.3/node-v12.16.3-linux-x64.tar.xz"
NODE_EXTRACT_NAME = "node-v12.16.3-linux-x64"
def create_asset(target_dir):
"""Create the asset."""
p1 = subprocess.Popen(["curl", NODE_URL], stdout=subprocess.PIPE)
p2 = subprocess.Popen(["tar", "-C", target_dir, "-xJf" "-"], stdin=p1.stdout)
p1.stdout.close() # Allow p1 to receive a SIGPIPE if p2 exits.
_,_ = p2.communicate()
os.rename(
os.path.join(target_dir, NODE_EXTRACT_NAME),
os.path.join(target_dir, "node")
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--target_dir', '-t', required=True)
args = parser.parse_args()
create_asset(args.target_dir)
if __name__ == '__main__':
main()
| {
"content_hash": "fd900dd3336fcf15cc1d0685c3c8cb1f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 24.363636363636363,
"alnum_prop": 0.6529850746268657,
"repo_name": "aosp-mirror/platform_external_skia",
"id": "33351c8e7cc890fefa3fe1ab7b749838e7ec0b5c",
"size": "963",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "infra/bots/assets/node/create.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "12716940"
},
{
"name": "Batchfile",
"bytes": "904"
},
{
"name": "C",
"bytes": "620774"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "27394853"
},
{
"name": "GLSL",
"bytes": "67013"
},
{
"name": "Go",
"bytes": "80137"
},
{
"name": "HTML",
"bytes": "1002516"
},
{
"name": "Java",
"bytes": "32794"
},
{
"name": "JavaScript",
"bytes": "51666"
},
{
"name": "Lex",
"bytes": "4372"
},
{
"name": "Lua",
"bytes": "70974"
},
{
"name": "Makefile",
"bytes": "2295"
},
{
"name": "Objective-C",
"bytes": "35223"
},
{
"name": "Objective-C++",
"bytes": "34410"
},
{
"name": "PHP",
"bytes": "120845"
},
{
"name": "Python",
"bytes": "1002226"
},
{
"name": "Shell",
"bytes": "49974"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
from .base_cvxproblem import Relevance_CVXProblem
class LUPI_Relevance_CVXProblem(Relevance_CVXProblem):
def __init__(
self,
current_feature: int,
data: tuple,
hyperparameters,
best_model_constraints,
preset_model=None,
best_model_state=None,
probeID=-1,
) -> None:
super().__init__(
current_feature,
data,
hyperparameters,
best_model_constraints,
preset_model,
best_model_state,
probeID,
)
def preprocessing_data(self, data, best_model_state):
lupi_features = best_model_state["lupi_features"]
X_combined, y = data
X, X_priv = split_dataset(X_combined, lupi_features)
self.X_priv = X_priv
super().preprocessing_data((X, y), best_model_state)
assert lupi_features == X_priv.shape[1]
self.d_priv = lupi_features
# LUPI model, we need to offset the index
self.lupi_index = self.current_feature - self.d
if self.lupi_index >= 0:
self.isPriv = True
else:
self.isPriv = False
def init_objective_UB(self, **kwargs):
# We have two models basically with different indexes
if self.isPriv:
self._init_objective_UB_LUPI(**kwargs)
else:
# We call sibling class of our lupi class, which is the normal problem
super().init_objective_UB(**kwargs)
def init_objective_LB(self, **kwargs):
# We have two models basically with different indexes
if self.isPriv:
self._init_objective_LB_LUPI(**kwargs)
else:
# We call sibling class of our lupi class, which is the normal problem
super().init_objective_LB(**kwargs)
@abstractmethod
def _init_objective_LB_LUPI(self, **kwargs):
pass
@abstractmethod
def _init_objective_UB_LUPI(self, **kwargs):
pass
def split_dataset(X_combined, lupi_features):
assert X_combined.shape[1] > lupi_features
X = X_combined[:, :-lupi_features]
X_priv = X_combined[:, -lupi_features:]
return X, X_priv
def is_lupi_feature(di, data, best_model_state):
lupi_features = best_model_state["lupi_features"]
X_combined, _ = data
d = X_combined.shape[1] - lupi_features
lupi_index = di - d
return lupi_index >= 0
| {
"content_hash": "7cea08912adfb22f519399e8c9b633f8",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 82,
"avg_line_length": 30.4625,
"alnum_prop": 0.5937628231432088,
"repo_name": "lpfann/fri",
"id": "f24f084baed52d7949a45e78596b50691f3ddf6e",
"size": "2437",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "fri/model/base_lupi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120434"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, url
from django.conf import settings
urlpatterns = patterns('modelmanager.views',
# Get all the ContentModels as JSON or HTML
url(r'^contentmodels\.(?P<extension>json|html|xml|drupal)$', 'get_all_models'),
# Get a single ContentModel as JSON or HTML
url(r'^contentmodel/(?P<content_model>.*)\.(?P<extension>json|html|xml|drupal)$', 'get_model'),
# Get a FeatureCatalogue for a particular ModelVersion
url(r'^featurecatalog/(?P<content_model>.*)/(?P<model_version>.*)\.xml', 'get_feature_catalog'),
# Homepage
url(r'^home/$', 'homepage'),
# Model viewing page
url(r'^models/$', 'models'),
# Tools page
url(r'^tools/$', 'tools'),
# Swagger documentation
url(r'^docs/', 'swaggerui'),
url(r'^swagger/(?P<path>.*)', 'swagger')
)
# Serve static files
if settings.DEBUG:
urlpatterns += patterns('',
url(r'^files/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| {
"content_hash": "3405c1b0300445097439db9376fa944d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 119,
"avg_line_length": 39.8235294117647,
"alnum_prop": 0.4807976366322009,
"repo_name": "usgin/modelmanager",
"id": "beee6f9ed0751fa9d18c28592fae78b8059fbd5a",
"size": "1354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "46332"
},
{
"name": "JavaScript",
"bytes": "144509"
},
{
"name": "Python",
"bytes": "93087"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from .views import retrieve_token,like,unlike,like_count,fill_modal,user_like
urlpatterns = patterns('',
url(r'^getCookie/$',retrieve_token, name = 'token'),
url(r'^like/$',like.as_view(),name='like'),
url(r'^unlike/$',unlike,name='unlike'),
url(r'^like_count/$',like_count,name='like_count'),
url(r'^fill/$',fill_modal.as_view(),name='fill'),
url(r'^label/$',user_like.as_view(),name='label'),
# (?P<id>\d+)/
)
| {
"content_hash": "228214e6d97f55063526e86747cb0ff7",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 77,
"avg_line_length": 25.05,
"alnum_prop": 0.626746506986028,
"repo_name": "amartinez1/confessions",
"id": "92dd42807a9d0ae240f773161c254370c4f96f9d",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "likes/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "56859"
},
{
"name": "JavaScript",
"bytes": "91746"
},
{
"name": "Python",
"bytes": "16206"
}
],
"symlink_target": ""
} |
import json
from django import forms
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from curling.lib import HttpClientError, HttpServerError
from mock import Mock, patch
from nose.tools import eq_, ok_
from rest_framework.request import Request
from test_utils import RequestFactory
import amo
from amo.tests import app_factory, TestCase
from mkt.api.tests.test_oauth import RestOAuth
from mkt.developers.api_payments import (AddonPaymentAccountSerializer,
PaymentAppViewSet)
from mkt.developers.models import (AddonPaymentAccount, PaymentAccount,
SolitudeSeller)
from mkt.developers.tests.test_providers import Patcher
from mkt.prices.models import AddonPremium, Price
from mkt.site.fixtures import fixture
from mkt.webapps.models import AddonUpsell, AddonUser, Webapp
package_data = {
'companyName': 'company',
'vendorName': 'vendor',
'financeEmailAddress': 'a@a.com',
'adminEmailAddress': 'a@a.com',
'supportEmailAddress': 'a@a.com',
'address1': 'address 1',
'addressCity': 'city',
'addressState': 'state',
'addressZipCode': 'zip',
'addressPhone': '123',
'countryIso': 'BRA',
'currencyIso': 'EUR',
'account_name': 'new',
'provider': 'bango',
}
bank_data = {
'bankAccountPayeeName': 'name',
'bankAccountNumber': '123',
'bankAccountCode': '123',
'bankName': 'asd',
'bankAddress1': 'address 2',
'bankAddressZipCode': '123',
'bankAddressIso': 'BRA',
}
payment_data = package_data.copy()
payment_data.update(bank_data)
class UpsellCase(TestCase):
def url(self, app):
return reverse('app-detail', kwargs={'pk': app.pk})
def setUp(self):
self.free = Webapp.objects.get(pk=337141)
self.free_url = self.url(self.free)
self.premium = app_factory(premium_type=amo.ADDON_PREMIUM)
self.premium_url = self.url(self.premium)
self.upsell_list = reverse('app-upsell-list')
def create_upsell(self):
self.upsell = AddonUpsell.objects.create(free=self.free,
premium=self.premium)
self.upsell_url = reverse('app-upsell-detail',
kwargs={'pk': self.upsell.pk})
def create_allowed(self):
AddonUser.objects.create(addon=self.free, user=self.profile)
AddonUser.objects.create(addon=self.premium, user=self.profile)
class TestUpsell(RestOAuth, UpsellCase):
fixtures = fixture('webapp_337141', 'user_2519')
def setUp(self):
super(TestUpsell, self).setUp()
UpsellCase.setUp(self)
def test_create(self):
eq_(self.client.post(self.upsell_list, data={}).status_code, 400)
def test_missing(self):
res = self.client.post(self.upsell_list,
data=json.dumps({'free': self.free_url}))
eq_(res.status_code, 400)
eq_(res.json['premium'], [u'This field is required.'])
def test_not_allowed(self):
res = self.client.post(self.upsell_list, data=json.dumps(
{'free': self.free_url, 'premium': self.premium_url}))
eq_(res.status_code, 403)
def test_allowed(self):
self.create_allowed()
res = self.client.post(self.upsell_list, data=json.dumps(
{'free': self.free_url, 'premium': self.premium_url}))
eq_(res.status_code, 201)
def test_delete_not_allowed(self):
self.create_upsell()
eq_(self.client.delete(self.upsell_url).status_code, 403)
def test_delete_allowed(self):
self.create_upsell()
self.create_allowed()
eq_(self.client.delete(self.upsell_url).status_code, 204)
def test_wrong_way_around(self):
res = self.client.post(self.upsell_list, data=json.dumps(
{'free': self.premium_url, 'premium': self.free_url}))
eq_(res.status_code, 400)
def test_patch_new_not_allowed(self):
# Trying to patch to a new object you do not have access to.
self.create_upsell()
self.create_allowed()
another = app_factory(premium_type=amo.ADDON_PREMIUM)
res = self.client.patch(self.upsell_url, data=json.dumps(
{'free': self.free_url, 'premium': self.url(another)}))
eq_(res.status_code, 403)
def test_patch_old_not_allowed(self):
# Trying to patch an old object you do not have access to.
self.create_upsell()
AddonUser.objects.create(addon=self.free, user=self.profile)
# We did not give you access to patch away from self.premium.
another = app_factory(premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=another, user=self.profile)
res = self.client.patch(self.upsell_url, data=json.dumps(
{'free': self.free_url, 'premium': self.url(another)}))
eq_(res.status_code, 403)
def test_patch(self):
self.create_upsell()
self.create_allowed()
another = app_factory(premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=another, user=self.profile)
res = self.client.patch(self.upsell_url, data=json.dumps(
{'free': self.free_url, 'premium': self.url(another)}))
eq_(res.status_code, 200)
class AccountCase(Patcher, TestCase):
def setUp(self):
self.app = Webapp.objects.get(pk=337141)
self.app.update(premium_type=amo.ADDON_PREMIUM)
self.seller = SolitudeSeller.objects.create(user_id=2519)
self.account = PaymentAccount.objects.create(user_id=2519,
solitude_seller=self.seller, account_id=123, name='mine')
self.app_payment_list = reverse('app-payment-account-list')
self.payment_list = reverse('payment-account-list')
self.payment_url = reverse('payment-account-detail',
kwargs={'pk': self.account.pk})
super(AccountCase, self).setUp()
self.patched_client.api.generic.product.get_object.return_value = {
'resource_uri': 'foo'}
self.patched_client.api.bango.product.get_object.return_value = {
'resource_uri': 'foo', 'bango_id': 'bar'}
def create(self):
self.payment = AddonPaymentAccount.objects.create(addon=self.app,
payment_account=self.account)
self.app_payment_detail = reverse('app-payment-account-detail',
kwargs={'pk': self.payment.pk})
def create_price(self):
price = Price.objects.create(price='1')
AddonPremium.objects.create(addon=self.app, price=price)
def create_user(self):
AddonUser.objects.create(addon=self.app, user=self.profile)
def other(self, shared=False):
self.seller2 = SolitudeSeller.objects.create(user_id=31337, uuid='foo')
self.other_account = PaymentAccount.objects.create(user_id=31337,
solitude_seller=self.seller2, account_id=123,
seller_uri='seller_uri', uri='uri', shared=shared, name='other')
self.other_url = reverse('payment-account-detail',
kwargs={'pk': self.other_account.pk})
return self.data(overrides={'payment_account': self.other_url})
def data(self, overrides=None):
res = {
'addon': self.app.get_api_url(pk=True),
'payment_account': self.payment_url,
'provider': 'bango',
}
if overrides:
res.update(overrides)
return res
class TestSerializer(AccountCase):
fixtures = fixture('webapp_337141', 'user_999', 'user_2519')
def test_serialize(self):
# Just a smoke test that we can serialize this correctly.
self.create()
request = Request(RequestFactory().get('/'))
res = AddonPaymentAccountSerializer(self.payment,
context={'request': request}).data
eq_(res['url'], self.app_payment_detail)
def test_free(self):
# Just a smoke test that we can serialize this correctly.
self.create()
self.app.update(premium_type=amo.ADDON_FREE)
res = AddonPaymentAccountSerializer(self.payment)
ok_(not res.is_valid())
@override_settings(DEFAULT_PAYMENT_PROVIDER='bango',
PAYMENT_PROVIDERS=['bango'])
class TestPaymentAccount(AccountCase, RestOAuth):
fixtures = fixture('webapp_337141', 'user_999', 'user_2519')
def test_anonymous(self):
r = self.anon.get(self.payment_url)
eq_(r.status_code, 403)
r = self.anon.get(self.payment_list)
eq_(r.status_code, 403)
def test_get_payments_account_list(self):
self.other()
res = self.client.get(self.payment_list)
data = json.loads(res.content)
eq_(data['meta']['total_count'], 1)
eq_(data['objects'][0]['account_name'], 'mine')
eq_(data['objects'][0]['resource_uri'], self.payment_url)
def test_get_payments_account(self):
res = self.client.get(self.payment_url)
eq_(res.status_code, 200, res.content)
data = json.loads(res.content)
eq_(data['account_name'], 'mine')
eq_(data['resource_uri'], self.payment_url)
def test_get_other_payments_account(self):
self.other()
res = self.client.get(self.other_url)
eq_(res.status_code, 404, res.content)
def test_create(self):
res = self.client.post(self.payment_list,
data=json.dumps(payment_data))
data = json.loads(res.content)
eq_(data['account_name'], 'new')
new_account = PaymentAccount.objects.get(name='new')
ok_(new_account.pk != self.account.pk)
eq_(new_account.user, self.user)
data = self.bango_patcher.package.post.call_args[1]['data']
expected = package_data.copy()
expected.pop('account_name')
expected.pop('provider')
for key in expected.keys():
eq_(package_data[key], data[key])
def test_update_payments_account(self):
res = self.client.put(self.payment_url,
data=json.dumps(payment_data))
eq_(res.status_code, 204, res.content)
self.account.reload()
eq_(self.account.name, 'new')
data = self.bango_patcher.api.by_url().patch.call_args[1]['data']
expected = package_data.copy()
expected.pop('account_name')
expected.pop('provider')
for key in expected.keys():
eq_(package_data[key], data[key])
def test_update_other_payments_account(self):
self.other()
res = self.client.put(self.other_url,
data=json.dumps(payment_data))
eq_(res.status_code, 404, res.content)
self.other_account.reload()
eq_(self.other_account.name, 'other') # not "new".
def test_delete_payments_account(self):
self.create_user()
self.create()
eq_(self.account.inactive, False)
res = self.client.delete(self.payment_url)
eq_(res.status_code, 204, res.content)
self.account.reload()
eq_(self.account.inactive, True)
def test_delete_shared(self):
self.create_user()
self.create()
self.account.update(shared=True)
eq_(self.account.inactive, False)
res = self.client.delete(self.payment_url)
eq_(res.status_code, 409)
def test_delete_others_payments_account(self):
self.create_user()
self.create()
self.other()
eq_(self.other_account.inactive, False)
res = self.client.delete(self.other_url)
eq_(res.status_code, 404, res.content)
self.other_account.reload()
eq_(self.other_account.inactive, False)
def test_create_fail(self):
err = {'broken': True}
self.bango_patcher.package.post.side_effect = HttpClientError(
content=err)
res = self.client.post(self.payment_list,
data=json.dumps(payment_data))
eq_(res.status_code, 500)
eq_(json.loads(res.content), err)
def test_create_fail2(self):
self.bango_patcher.package.post.side_effect = HttpServerError()
res = self.client.post(self.payment_list,
data=json.dumps(payment_data))
eq_(res.status_code, 500)
@override_settings(DEFAULT_PAYMENT_PROVIDER='bango',
PAYMENT_PROVIDERS=['bango'])
class TestAddonPaymentAccount(AccountCase, RestOAuth):
fixtures = fixture('webapp_337141', 'user_999', 'user_2519')
def test_empty(self):
eq_(self.client.post(self.app_payment_list, data={}).status_code, 400)
def test_not_allowed(self):
res = self.client.post(self.app_payment_list,
data=json.dumps(self.data()))
eq_(res.status_code, 403)
def test_allowed(self):
self.bango_patcher.product.get_object_or_404.return_value = {
'resource_uri': '/f/b'}
self.create_price()
self.create_user()
res = self.client.post(self.app_payment_list,
data=json.dumps(self.data()))
eq_(res.status_code, 201, res.content)
account = AddonPaymentAccount.objects.get()
eq_(account.payment_account, self.account)
def test_cant_change_addon(self):
app = app_factory(premium_type=amo.ADDON_PREMIUM)
AddonUser.objects.create(addon=app, user=self.profile)
self.create()
self.create_price()
self.create_user()
data = self.data({'payment_account': self.payment_url,
'addon': app.get_api_url(pk=True)})
res = self.client.patch(self.app_payment_detail, data=json.dumps(data))
# Ideally we should make this a 400.
eq_(res.status_code, 403, res.content)
def test_cant_use_someone_elses(self):
data = self.other(shared=False)
self.create_price()
self.create_user()
res = self.client.post(self.app_payment_list, data=json.dumps(data))
eq_(res.status_code, 403, res.content)
def test_can_shared(self):
self.bango_patcher.product.get_object_or_404.return_value = {
'resource_uri': '/f/b'}
data = self.other(shared=True)
self.create_price()
self.create_user()
res = self.client.post(self.app_payment_list, data=json.dumps(data))
eq_(res.status_code, 201, res.content)
class TestPaymentStatus(AccountCase, RestOAuth):
fixtures = fixture('webapp_337141', 'user_999', 'user_2519')
def setUp(self):
super(TestPaymentStatus, self).setUp()
self.create()
self.payment.account_uri = '/bango/package/1/'
self.payment.save()
self.list_url = reverse('app-payments-status-list',
kwargs={'pk': 337141})
def test_no_auth(self):
eq_(self.anon.post(self.list_url, data={}).status_code, 403)
def test_not_owner(self):
eq_(self.client.post(self.list_url, data={}).status_code, 403)
def test_no_account(self):
self.payment.delete()
eq_(self.client.post(self.list_url, data={}).status_code, 400)
@patch('mkt.developers.api_payments.get_client')
def test_owner(self, get_client):
client = Mock()
client.api.bango.status.post.return_value = {'status': 1}
get_client.return_value = client
AddonUser.objects.create(addon_id=337141, user_id=self.user.pk)
res = self.client.post(self.list_url, data={})
eq_(res.json['bango']['status'], 'passed')
eq_(res.status_code, 200)
class TestPaymentDebug(AccountCase, RestOAuth):
fixtures = fixture('webapp_337141', 'user_999', 'user_2519')
def setUp(self):
super(TestPaymentDebug, self).setUp()
self.create()
self.payment.account_uri = '/bango/package/1/'
self.payment.save()
self.list_url = reverse('app-payments-debug-list',
kwargs={'pk': 337141})
def test_no_auth(self):
eq_(self.anon.get(self.list_url).status_code, 403)
def test_no_perms(self):
eq_(self.client.get(self.list_url).status_code, 403)
@patch('mkt.developers.api_payments.get_client')
def test_good(self, get_client):
client = Mock()
client.api.bango.debug.get.return_value = {'bango':
{'environment': 'dev'}}
get_client.return_value = client
self.app.update(premium_type=amo.ADDON_FREE_INAPP)
self.grant_permission(self.profile, 'Transaction:Debug')
res = self.client.get(self.list_url)
eq_(res.status_code, 200)
eq_(res.json['bango']['environment'], 'dev')
class Form(forms.Form):
app = forms.ChoiceField(choices=(('valid', 'valid'),))
class TestPaymentAppViewSet(TestCase):
def setUp(self):
self.request = RequestFactory().get('/')
self.viewset = PaymentAppViewSet()
self.viewset.action_map = {}
self.viewset.form = Form
def test_ok(self):
self.viewset.initialize_request(self.request, pk='valid')
ok_(self.viewset.app)
def test_not_ok(self):
self.viewset.initialize_request(self.request, pk='invalid')
eq_(self.viewset.app, None)
| {
"content_hash": "cab5d2635c6cf9efd0b8460860aa04dd",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 79,
"avg_line_length": 37.091684434968016,
"alnum_prop": 0.6102552310876064,
"repo_name": "ngokevin/zamboni",
"id": "2201ba1d7cf7ec4ecad2868c4cd9c46b9af6c102",
"size": "17396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/developers/tests/test_api_payments.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356777"
},
{
"name": "JavaScript",
"bytes": "536388"
},
{
"name": "Python",
"bytes": "3883015"
},
{
"name": "Shell",
"bytes": "13597"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('bhs', '0003_auto_20190906_2147'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='chapters',
),
migrations.RemoveField(
model_name='group',
name='participants',
),
migrations.RemoveField(
model_name='group',
name='pos',
),
]
| {
"content_hash": "390c493f064c2abaa0e6d484eae77d55",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 43,
"avg_line_length": 21,
"alnum_prop": 0.5093167701863354,
"repo_name": "dbinetti/barberscore-django",
"id": "9769041b8622db9295dc711086c167a61c5c1f46",
"size": "532",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "project/apps/bhs/migrations/0004_auto_20190909_0637.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "20279"
},
{
"name": "Python",
"bytes": "348418"
}
],
"symlink_target": ""
} |
import frida
import sys
from pwn import log
import json
import os
MODULES = []
FUNCTIONS = []
SCRIPT = """
Interceptor.attach(ptr("{addr}"), {{
onEnter: function(args) {{
send({format});
}}
}});
"""
PATH = ""
def on_message(message, data):
if message['type'] == 'send':
info = json.loads(str(message['payload']).encode('string-escape'),
strict=False)
filename = PATH + info["name"] + ".dat"
with open(filename, "a+") as f:
json.dump(info, f)
f.write("\n")
log.info("stored call to " + info["name"])
else:
log.warning("Could not parse: " + str(message))
def genscript(info, funct):
fstring = '\'{'
fstring += '"name": "{}", '.format(funct.name)
fstring += '"parameters": ['
for p in info["parameters"]:
if p["monitor"]:
fstring += '{'
fstring += '"name": "{}", '.format(p["name"])
fstring += '"content": "\' + '
if(p["type"] == "string"):
fstring += '"\\' + '\\x" + '
fstring += 'Memory.readCString('
fstring += 'args[{}]'.format(info["parameters"].index(p))
fstring += ').split("").map(function(a){return '
fstring += 'a.charCodeAt(0).toString(16)}).join("\\'
fstring += '\\x")'
elif(p["type"] == "num"):
fstring += 'args[{}]'.format(info["parameters"].index(p))
fstring += '.toInt32()'
elif(p["type"] == "addr"):
fstring += 'args[{}]'.format(info["parameters"].index(p))
else:
log.warn("UNKNOWN TYPE IN: " + p)
fstring += ' + \'"}, '
if fstring[-2:] == ', ': # remove ', '
fstring = fstring[:-2]
fstring += ']'
fstring += '}\''
d = {
'addr': funct.absolute_address,
'format': fstring
}
tosend = SCRIPT.format(**d)
return tosend
def main(target):
global PATH
log.info("Going to analyze {}".format(target))
try:
session = frida.get_usb_device().attach(target)
except frida.ServerNotRunningError:
try:
log.error("Please start frida server first")
except:
sys.exit(-1)
except frida.TimedOutError:
try:
log.error("Frida timeout...")
except:
sys.exit(-1)
with open("config/modules.json") as j:
MODULES = json.load(j)
log.info("Will look at: {}".format(', '.join(MODULES)))
PATH = "results/" + sys.argv[1] + "/"
if not os.path.exists(PATH):
os.makedirs(PATH)
runnr = len([x for x in os.listdir(PATH) if os.path.isdir(PATH + x)])
PATH += "run_"
PATH += str(runnr)
PATH += "/"
if not os.path.exists(PATH):
os.makedirs(PATH)
# Get only needed Modules
modules = session.enumerate_modules()
tmp = []
for M in MODULES:
tmp.append(modules[[x.name for x in modules].index(M)])
modules = tmp
functions = []
for x in modules:
functions += x.enumerate_exports()
log.info("Found {} functions".format(len(functions)))
# Which functions do I need to look at?
for filename in os.listdir("functions/"):
with open("functions/" + filename) as j:
FUNCTIONS.append(json.load(j))
lookup = [x["name"] for x in FUNCTIONS]
log.info("Will look for: {}".format(', '.join(lookup)))
for f in lookup:
try:
result = functions[[x.name for x in functions].index(f)]
except ValueError:
log.warn("Function " + f + "not found")
continue
log.info("Found {} in {} @ {}".format(result.name,
result.module.name,
hex(result.absolute_address)
))
script = session.create_script(genscript(FUNCTIONS[lookup.index(f)],
result))
script.on('message', on_message)
script.load()
log.info("Injected all needed scripts, now listening")
sys.stdin.read()
if __name__ == '__main__':
if len(sys.argv) != 2:
try:
log.error("Usage: %s <process name or PID>" % __file__)
except:
sys.exit(-1)
try:
target_process = int(sys.argv[1])
except ValueError:
target_process = sys.argv[1]
main(target_process)
| {
"content_hash": "ed7356063460825f56b97aa0f0e30365",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 76,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.4940580985915493,
"repo_name": "sigttou/analyzecrypt.py",
"id": "6ef6448137e8d63b3c26f2da89a28609a4c8e446",
"size": "4567",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyze.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "335"
},
{
"name": "C",
"bytes": "14242"
},
{
"name": "C++",
"bytes": "4895"
},
{
"name": "Python",
"bytes": "23199"
}
],
"symlink_target": ""
} |
from tweepy.error import TweepError
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href, \
parse_search_datetime, unescape_html
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""Parse a list of JSON objects into a result set of model instances."""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user')
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name, *args, **kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name, *args, **kargs)
def lists(self, *args, **kargs):
return self._api.lists(user=self.screen_name, *args, **kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id, *args, **kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResult(Model):
@classmethod
def parse(cls, api, json):
result = cls()
for k, v in json.items():
if k == 'created_at':
setattr(result, k, parse_search_datetime(v))
elif k == 'source':
setattr(result, k, parse_html_value(unescape_html(v)))
else:
setattr(result, k, v)
return result
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
results.max_id = json_list.get('max_id')
results.since_id = json_list.get('since_id')
results.refresh_url = json_list.get('refresh_url')
results.next_page = json_list.get('next_page')
results.results_per_page = json_list.get('results_per_page')
results.page = json_list.get('page')
results.completed_in = json_list.get('completed_in')
results.query = json_list.get('query')
for obj in json_list['results']:
results.append(cls.parse(api, obj))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k,v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name, self.slug, **kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name, self.slug, **kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name, self.slug, id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name, self.slug, **kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name, self.slug, id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k,v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_result = SearchResult
category = Category
list = List
relation = Relation
relationship = Relationship
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| {
"content_hash": "090eb4ed30297d16a1c5383b75c90849",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 84,
"avg_line_length": 28.728132387706857,
"alnum_prop": 0.5506912442396313,
"repo_name": "sanoju/GaeTwWpBot",
"id": "fe87adb0779d56d2cf20878d1b649f539912c2d7",
"size": "12228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tweepy/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148963"
}
],
"symlink_target": ""
} |
from pycrans.refactor.rules import match_rule
class Rewriter(object):
"""
Rewrite a script based on a certain set of rules
"""
def __init__(self, script, analysis):
super(self.__class__, self).__init__()
self.script = script
self.analysis = analysis
def rewrite(self):
"""
Apply the rewritting rules to a certain script
"""
rule = match_rule(self.analysis)
if rule is None:
raise RuntimeError(" No rule matched the script")
return rule.apply(self.script)
| {
"content_hash": "fe2eb470b278219cd4eef6483c028b13",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 61,
"avg_line_length": 24.565217391304348,
"alnum_prop": 0.5911504424778761,
"repo_name": "citeaalexandru/PyCrans",
"id": "c27b808ff5b4e104ca1454b20eb38027341d4d13",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/pycrans/refactor/rewriter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1093625"
}
],
"symlink_target": ""
} |
import os
import sys
import re
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
version = get_version('stored_messages')
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
setup(
name='django-stored-messages',
version=version,
description='Django contrib.messages on steroids',
long_description=readme + '\n\n' + history,
author='evonove',
author_email='info@evonove.it',
url='https://github.com/evonove/django-stored-messages',
packages=[
'stored_messages',
],
include_package_data=True,
install_requires=[
'Django>=1.7',
],
license="BSD",
zip_safe=False,
keywords='django-stored-messages',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| {
"content_hash": "567db6bf9cde6110ab9a65415df495c7",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 75,
"avg_line_length": 30.227272727272727,
"alnum_prop": 0.587468671679198,
"repo_name": "nthall/django-stored-messages",
"id": "d3fc92f67f2dba9af7a8ff94d11edb0d328a1489",
"size": "2042",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "803"
},
{
"name": "Python",
"bytes": "65241"
}
],
"symlink_target": ""
} |
"""
Lookup an MX record and printout all the MX preference, target, and
associated IP addresses of the targets.
"""
import getdns, pprint, sys
extensions = { "return_both_v4_and_v6" : getdns.EXTENSION_TRUE }
def get_ip(ctx, qname):
iplist = []
try:
results = ctx.address(name=qname, extensions=extensions)
except getdns.error as e:
print(str(e))
sys.exit(1)
if results.status == getdns.RESPSTATUS_GOOD:
for addr in results.just_address_answers:
iplist.append(addr['address_data'])
else:
print("getdns.address() returned an error: {0}".format(results.status))
return iplist
if __name__ == '__main__':
qname = sys.argv[1]
ctx = getdns.Context()
try:
results = ctx.general(name=qname, request_type=getdns.RRTYPE_MX)
except getdns.error as e:
print(str(e))
sys.exit(1)
status = results.status
hostlist = []
if status == getdns.RESPSTATUS_GOOD:
for reply in results.replies_tree:
answers = reply['answer']
for answer in answers:
if answer['type'] == getdns.RRTYPE_MX:
iplist = get_ip(ctx, answer['rdata']['exchange'])
for ip in iplist:
hostlist.append( (answer['rdata']['preference'], \
answer['rdata']['exchange'], ip) )
elif status == getdns.RESPSTATUS_NO_NAME:
print("{0}, {1}: no such name".format(qname, qtype))
elif status == getdns.RESPSTATUS_ALL_TIMEOUT:
print("{0}, {1}: query timed out".format(qname, qtype))
else:
print("{0}, {1}: unknown return code: {2}".format(qname, qtype, results["status"]))
for (pref, mx, addr) in sorted(hostlist):
print(pref, mx, addr)
| {
"content_hash": "e3a022e6f74958964655531d5c2c4f08",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 91,
"avg_line_length": 31.20689655172414,
"alnum_prop": 0.5762430939226519,
"repo_name": "getdnsapi/getdns-python-bindings",
"id": "6399debb0bd58308e0e1a3b1e73177e758d6e1eb",
"size": "1835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/get-mx-ip.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "167399"
},
{
"name": "Python",
"bytes": "12834"
}
],
"symlink_target": ""
} |
"""
Register nodes with HaaS.
This is intended to be used as a template for either creating a mock HaaS setup
for development or to be modified to register real-life nodes that follow a
particular pattern.
In the example environment for which this module is written, there are 10
nodes which have IPMI interfaces that are sequentially numbered starting with
10.0.0.0, have a username of "ADMIN_USER" and password of "ADMIN_PASSWORD".
The ports are also numbered sequentially and are named following a dell switch
scheme, which have ports that look like "R10SW1::GI1/0/5"
It could be used in an environment similar to the one which
``haas.cfg`` corresponds, though could also be used for development with the
``haas.cfg.dev*``
"""
from subprocess import check_call
N_NODES = 6
ipmi_user = "ADMIN_USER"
ipmi_pass = "ADMIN_PASSWORD"
switch = "mock01"
def haas(*args):
args = map(str, args)
print args
check_call(['haas'] + args)
haas('switch_register', switch, 'mock', 'ip', 'user', 'pass')
for node in range(N_NODES):
ipmi_ip = "10.0.0." + str(node + 1)
nic_port = "R10SW1::GI1/0/%d" % (node)
nic_name = 'nic1'
haas('node_register', node, "mock", ipmi_ip, ipmi_user, ipmi_pass)
haas('node_register_nic', node, nic_name, 'FillThisInLater')
haas('port_register', switch, nic_port)
haas('port_connect_nic', switch, nic_port, node, nic_name)
| {
"content_hash": "841355b187cb192a971fbfdd461e2824",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 32.25581395348837,
"alnum_prop": 0.7072819033886085,
"repo_name": "kylehogan/hil",
"id": "5e9da6fdf171f54130aee03068b3afad3a1495a2",
"size": "1405",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dbinit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "452"
},
{
"name": "Python",
"bytes": "414855"
},
{
"name": "Shell",
"bytes": "5541"
}
],
"symlink_target": ""
} |
'''Python requirement files classifiers tests.'''
import re
import mock
import pytest
from reqfiles import classifiers
from . import common
@pytest.mark.parametrize(('name', 'key_keyword'), common.KEY_FIXTURES)
def test_keyword_for(classifier, name, key_keyword):
'''Tests :py:meth:`reqfiles.classifiers.Baseclassifier.keyword_for`.'''
assert classifier.get(name) == key_keyword
def test_classify():
expected = ('install_requires', None)
p1 = mock.Mock(**{'return_value.check.return_value': None})
p2 = mock.Mock(**{'return_value.check.return_value': expected})
with mock.patch.object(classifiers, 'Classifier') as mocked:
mocked.plugins = iter([p1, p2])
assert expected == classifiers.classify('name')
p1.assert_called_once_with()
p1.check.asert_called_once_with('name')
p2.assert_called_once_with()
p2.check.asert_called_once_with('name')
class FooClassifier(classifiers.RegexClassifierMixin, object):
regex = re.compile(r'requirements/(?P<name>foo).txt')
def get(self, name):
return 'install_requires', None
class TestsRegexClassifierMixin(object):
def setup_method(self, name):
self.classifier = FooClassifier()
def test_check_match(self):
'''Tests FooClassifier match.'''
assert ('install_requires', None) == self.classifier.check('requirements/foo.txt')
def test_check_no_match(self):
assert None == self.classifier.check('requirements/spam.txt')
def test_no_regex_raises(self):
self.classifier.regex = None
with pytest.raises(ValueError):
self.classifier.check('something')
class TestNamesClassifierMixin(object):
def setup_method(self, name):
self.classifier = classifiers.Requirements()
def test_check_match(self):
"""Tests FooClassifier match."""
assert ('install_requires', None) == self.classifier.check('requirements.txt')
assert ('install_requires', None) == self.classifier.check('requires.txt')
assert ('install_requires', None) == self.classifier.check('directory/requires.txt')
def test_check_no_match(self):
assert None == self.classifier.check('requirementss.txt')
def test_no_names_raises(self):
self.classifier.names = None
with pytest.raises(ValueError):
self.classifier.check('something')
def test_no_key_keyword_raises(self):
self.classifier.key_keyword = None
with pytest.raises(ValueError):
self.classifier.check('something')
@pytest.mark.parametrize(
('filename', 'key_keyword'),
(('directory/tests_requirements.txt', ('tests_require', None)),
('ci-requirements.txt', ('extras_require', 'ci')),
('directory\dev-requirements.txt', ('extras_require', 'dev')),)
)
def test_requirements_files_classifier(rfc_classifier, filename, key_keyword):
"""Tests RequirementsFilesClassifier classifier."""
assert rfc_classifier.check(filename) == key_keyword
| {
"content_hash": "5baa9f7dbc1f2c380d1214c31d0abd73",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 92,
"avg_line_length": 34.41379310344828,
"alnum_prop": 0.6780227120908484,
"repo_name": "rafaduran/reqfiles",
"id": "b8f5f07ea7f732837c7f524bbf74180f0f63323d",
"size": "2994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_classifiers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26240"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
Authors and contributors: Eric Shook (eshook@kent.edu); Zhengliang Feng (odayfans@gmail.com, zfeng2@kent.edu)
"""
from ..util.Messaging import *
from .Decomposition import *
from .BoundingBox import *
from .Iteration import *
from .PCMLPrims import *
from .PCMLConfig import *
from abc import ABCMeta, abstractmethod
class Operation(object):
__metaclass__ = ABCMeta
def __init__(self, name, *args, **kwargs):
"""Operations are applied to layers.
Args:
:param name (string): String representation of Operation name
:param layers (tuple): Tuple of layers to apply operation on
:param opclass (OpClass): Operation classification (local, focal, zonal, global)
"""
# Derive class name from operation name
self.name = name
PCMLTODO("Only row decomposition method supported, so hardcoding for now")
_layerstuple = kwargs.get('layers', None)
if _layerstuple is not None:
self._layers = list(_layerstuple)
self.opclass = kwargs.get('opclass', OpClass.localclass)
self.buffersize = kwargs.get('buffersize', 0)
self.decomposition = kwargs.get('decomposition', rowdecomposition) # By default use row decomposition
self.iteration = kwargs.get('iteration', rowmajoriteration) # By default use product-based iteration
# adding this to get the operation specified parameter
self.kwargs = kwargs
self.outputlayer = kwargs.get('outputlayer', None)
if self.opclass == OpClass.localclass and self.buffersize != 0:
raise PCMLOperationError("Buffersize should be 0 for localclass currently %s" % self.buffersize)
# If zonal operation we want the entire layer data
if self.opclass == OpClass.zonalclass:
self.buffersize = 999999999999
def __repr__(self):
return "<Operation: %s : %i layers>" % (self.name, len(self._layers))
def getOutputLayers(self):
PCMLTODO("Need to support more than one output layer")
return self._layers[0]
def _decompositioninit(self):
# Duplicate a layer to create an output layer with the correct dimensions
# Get the first layer
firstlayer = self._layers[0]
# if outputlayer is passed as an argument, use it, else create an outputlayer from the first layer
if self.outputlayer is None:
outputlayer = firstlayer.duplicate()
else:
outputlayer = self.outputlayer
outputlayer.title = "Output for operation %s" % self.name
self._layers.insert(0, outputlayer) # Add the output layer to the front of the layers list
def _decompositionrun(self):
""" Divides the :member:_layers into subdomains for further processing.
The decomposition method is defined by :member:`decompositionmethod`.
You can also define you own decomposition algorithm by overriding this method.
"""
PCMLTODO("Need to support multiple output layers, this can be done by overriding decomposition and inserting multiple output layers")
listofsubdomains = []
self._decompositioninit()
# The output layer is the first layer in the layers list (self.layers[0])
# Decompose it with a 0 buffer
# listofsubdomains.append(self._layers[0].decomposition(self.decomposition_method, 0))
listofsubdomains.append(self.decomposition(self._layers[0], 0))
if self._layers[0].data_structure == Datastructure.pointlist:
self._layers[0].set_pointlist([])
for layer in self._layers:
if layer != self._layers[0]: # Skip the output layer, because it was already decomposed and added
# listofsubdomains.append(layer.decomposition(self.decomposition_method, self.buffersize)) # buffer size is set based on classification (L,F,Z,G)
if self.decomposition.__name__ == 'pointrasterrowdecomposition':
listofsubdomains.append(self.decomposition(layer, self.buffersize, layerlist=self._layers))
else:
# Create a subdomain and populate it with the correct attribute values
listofsubdomains.append(self.decomposition(layer, self.buffersize)) # buffer size is set based on classification (L,F,Z,G)
# The listofsubdomains is inverted using zip and map to create a list of lists
# so that each subdomain is grouped with the corresponding subdomain from each layer (see example below)
subdomainlists = map(list, zip(*listofsubdomains))
# listofsubdomains = ( (layer1subdomain1 , layer1subdomain2) , (layer2subdomain1 , layer2subdomain2) )
# subdomainlists = ( (layer1subdomain1 , layer2subdomain1) , (layer1subdomain2 , layer2subdomain2) )
return subdomainlists
# By default we use rowdecomposition as our decomposition method
# Users may override decomposition with any other method they would like using kwargs (see __init__)
# def decomposition(layer,buffersize):
# By default we use rowmajoriteration as our iteration method
# Users may override iteration with any other method they would like using kwargs (see __init__)
# def iteration(subdomain):
def executor(self, subdomains):
""" Executor handles processing of the function by iterating over locations in a subdomain
:return: #TODO: Undefined return value.
"""
PCMLTODO("executor assumes single subdomain as output, which is not universal for all operations")
outsubdomain = subdomains.pop(0)
if outsubdomain.data_structure == Datastructure.pointlist:
pointlist = outsubdomain.get_pointlist()
for i in xrange(len(pointlist)):
val = self.function([pointlist[i]], subdomains)
newdict = pointlist[i].copy()
newdict['v'] = val
pointlist[i] = newdict
if PCMLConfig.exectype == ExecutorType.serialpython:
self._layers[0].get_pointlist().extend(outsubdomain.get_pointlist())
elif outsubdomain.data_structure == Datastructure.array:
outarr = outsubdomain.get_nparray()
# Iterate over locations in the outsubdomain using iteration method and apply function to each location
for loc in self.iteration(outsubdomain):
l = [] # Create an empty list to store locations
for sd in subdomains:
if sd.data_structure != Datastructure.array: # Skip non array subdomains
continue
# Get a location in this subdomain with same coordinates as locind
locv = sd.get_locval(loc)
l.append(locv) # append to list of locations
val = self.function(l, subdomains) # Apply function to all locations
outarr[loc['r'] - outsubdomain.r][loc['c'] - outsubdomain.c] = val # Set val to outarr at locind
def writepointdatatooutputlayer(self, subdomainlists):
for subdomains in subdomainlists:
self._layers[0].get_pointlist().extend(subdomains[0].get_pointlist())
def function(self, locations, subdomains):
raise PCMLOperationError("Operation function is not implemented")
| {
"content_hash": "555c80e166a2bc4c968a32099f3305fd",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 161,
"avg_line_length": 51.91724137931035,
"alnum_prop": 0.6644527098831031,
"repo_name": "HPCGISLab/pcml",
"id": "5d9f50b8613ab3484e3f82ccf81cfbc8dbdc15f6",
"size": "7528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcml/core/Operation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "174819"
},
{
"name": "Shell",
"bytes": "652"
}
],
"symlink_target": ""
} |
import socket
import os
import sys
import time
import random
from itertools import chain
from fdfs_client.exceptions import (
FDFSError,
ConnectionError,
ResponseError,
InvaildResponse,
DataError
)
# start class Connection
class Connection(object):
'''Manage TCP comunication to and from Fastdfs Server.'''
def __init__(self, **conn_kwargs):
self.pid = os.getpid()
self.host_tuple = conn_kwargs['host_tuple']
self.remote_port = None
self.remote_addr = None
self.timeout = conn_kwargs['timeout']
self._sock = None
def __del__(self):
try:
self.disconnect()
except:
pass
def connect(self):
'''Connect to fdfs server.'''
if self._sock:
return
try:
sock = self._connect()
except socket.error, e:
raise ConnectionError(self._errormessage(e))
self._sock = sock
#print '[+] Create a connection success.'
#print '\tLocal address is %s:%s.' % self._sock.getsockname()
#print '\tRemote address is %s:%s' % (self.remote_addr, self.remote_port)
def sendall(self, msg):
if not self._sock:
self.connect()
self._sock.sendall(msg)
def recv(self, len):
return self._sock.recv(len)
def _connect(self):
'''Create TCP socket. The host is random one of host_tuple.'''
self.remote_addr, self.remote_port = random.choice(self.host_tuple)
#print '[+] Connecting... remote: %s:%s' % (self.remote_addr, self.remote_port)
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#sock.settimeout(self.timeout)
sock = socket.create_connection((self.remote_addr, self.remote_port),self.timeout)
return sock
def disconnect(self):
'''Disconnect from fdfs server.'''
if self._sock is None:
return
try:
self._sock.close()
except socket.error, e:
pass
self._sock = None
def get_sock(self):
return self._sock
def _errormessage(self, exception):
# args for socket.error can either be (errno, "message")
# or just "message" '''
if len(exception.args) == 1:
return "[-] Error: connect to %s:%s. %s." % \
(self.remote_addr, self.remote_port, exception.args[0])
else:
return "[-] Error: %s connect to %s:%s. %s." % \
(exception.args[0], self.remote_addr, self.remote_port, exception.args[1])
# end class Connection
# start ConnectionPool
class ConnectionPool(object):
'''Generic Connection Pool'''
def __init__(self, name = '', conn_class = Connection,
max_conn = None, **conn_kwargs):
self.pool_name = name
self.pid = os.getpid()
self.conn_class = conn_class
self.max_conn = max_conn or 2**31
self.conn_kwargs = conn_kwargs
self._conns_created = 0
self._conns_available = []
self._conns_inuse = set()
#print '[+] Create a connection pool success, name: %s.' % self.pool_name
def _check_pid(self):
if self.pid != os.getpid():
self.destroy()
self.__init__(self.pool_name, self.conn_class, self.max_conn, **self.conn_kwargs)
def make_conn(self):
'''Create a new connection.'''
if self._conns_created >= self.max_conn:
raise ConnectionError('[-] Error: Too many connections.')
num_try = 10
while True:
try:
if num_try <= 0:
sys.exit()
conn_instance = self.conn_class(**self.conn_kwargs)
conn_instance.connect()
self._conns_created += 1
break
except ConnectionError, e:
print e
num_try -= 1
conn_instance = None
return conn_instance
def get_connection(self):
'''Get a connection from pool.'''
self._check_pid()
try:
conn = self._conns_available.pop()
#print '[+] Get a connection from pool %s.' % self.pool_name
#print '\tLocal address is %s:%s.' % conn._sock.getsockname()
#print '\tRemote address is %s:%s' % (conn.remote_addr, conn.remote_port)
except IndexError:
conn = self.make_conn()
self._conns_inuse.add(conn)
return conn
def remove(self, conn):
'''Remove connection from pool.'''
if conn in self._conns_inuse:
self._conns_inuse.remove(conn)
self._conns_created -= 1
if conn in self._conns_available:
self._conns_available.remove(conn)
self._conns_created -= 1
def destroy(self):
'''Disconnect all connections in the pool.'''
all_conns = chain(self._conns_inuse, self._conns_available)
for conn in all_conns:
conn.disconnect()
#print '[-] Destroy connection pool %s.' % self.pool_name
def release(self, conn):
'''Release the connection back to the pool.'''
self._check_pid()
if conn.pid == self.pid:
self._conns_inuse.remove(conn)
self._conns_available.append(conn)
#print '[-] Release connection back to pool %s.' % self.pool_name
# end ConnectionPool class
def tcp_recv_response(conn, bytes_size, buffer_size = 4096):
'''Receive response from server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_size: int, will be received byte_stream size
@buffer_size: int, receive buffer size
@Return: tuple,(response, received_size)
'''
recv_buff = []
total_size = 0
try:
while bytes_size > 0:
resp = conn._sock.recv(buffer_size)
recv_buff.append(resp)
total_size += len(resp)
bytes_size -= len(resp)
except (socket.error, socket.timeout), e:
raise ConnectionError('[-] Error: while reading from socket: (%s)' \
% e.args)
return (''.join(recv_buff), total_size)
def tcp_send_data(conn, bytes_stream):
'''Send buffer to server.
It is not include tracker header.
arguments:
@conn: connection
@bytes_stream: trasmit buffer
@Return bool
'''
try:
conn._sock.sendall(bytes_stream)
except (socket.error, socket.timeout), e:
raise ConnectionError('[-] Error: while writting to socket: (%s)' \
% e.args)
| {
"content_hash": "e6bd81ec7162a58aca271b0e99768be2",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 93,
"avg_line_length": 33.796954314720814,
"alnum_prop": 0.557074196455392,
"repo_name": "golden-tech-native/gd_facerecognize",
"id": "ad8a5d627907ee7fe4178a5a4a549bdf3a4b6424",
"size": "6731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "external/fdfs_client_nowindows/connection.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "366195"
},
{
"name": "HTML",
"bytes": "3520040"
},
{
"name": "JavaScript",
"bytes": "4511474"
},
{
"name": "Lua",
"bytes": "50338"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "194076"
},
{
"name": "Shell",
"bytes": "3059"
}
],
"symlink_target": ""
} |
from oslo.config import cfg
from neutron.plugins.embrane.common import config # noqa
from neutron.tests.unit import test_extension_extraroute as extraroute_test
from neutron.tests.unit import test_l3_plugin as router_test
PLUGIN_NAME = ('neutron.plugins.embrane.plugins.embrane_fake_plugin.'
'EmbraneFakePlugin')
class TestEmbraneL3NatDBTestCase(router_test.L3NatDBIntTestCase):
_plugin_name = PLUGIN_NAME
def setUp(self):
cfg.CONF.set_override('admin_password', "admin123", 'heleos')
super(TestEmbraneL3NatDBTestCase, self).setUp()
class ExtraRouteDBTestCase(extraroute_test.ExtraRouteDBIntTestCase):
_plugin_name = PLUGIN_NAME
| {
"content_hash": "2db671ae75e85093260be131b2005dc2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 75,
"avg_line_length": 34,
"alnum_prop": 0.7573529411764706,
"repo_name": "CingHu/neutron-ustack",
"id": "cd4dc615022d0059fd238fe1935cd6bea52c0804",
"size": "1351",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/embrane/test_embrane_l3_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1109"
},
{
"name": "Python",
"bytes": "11544804"
},
{
"name": "Shell",
"bytes": "29485"
}
],
"symlink_target": ""
} |
import cinderclient
from cinderclient import api_versions
from cinderclient import exceptions
from os_brick.initiator import connector
from oslo_utils import uuidutils
from pbr import version as pbr_version
from brick_cinderclient_ext import brick_utils
from brick_cinderclient_ext import volume_actions as actions
class Client(object):
"""Python client for os-brick
Version history:
1.0.0 - Initial version
1.1.0 - Query volume paths implementation
1.2.0 - Add --nic attribute to get-connector
1.3.0 - Added new v3 attach/detach workflow support
"""
version = '1.3.0'
# Use the legacy attach/detach workflow?
_use_legacy_attach = True
def __init__(self, volumes_client=None):
self.volumes_client = volumes_client
# Test to see if we have a version of the cinderclient
# that can do the new volume attach/detach API
version_want = pbr_version.SemanticVersion(major=2)
current_version = cinderclient.version_info.semantic_version()
if (self.volumes_client and current_version >= version_want):
# We have a recent enough client to test the microversion we need.
required_version = api_versions.APIVersion("3.44")
if self.volumes_client.api_version.matches(required_version):
# we can use the new attach/detach API
self._use_legacy_attach = False
def _brick_get_connector(self, protocol, driver=None,
execute=None,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
return connector.InitiatorConnector.factory(
protocol,
brick_utils.get_root_helper(),
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
*args, **kwargs)
def get_connector(self, multipath=False, enforce_multipath=False,
nic=None):
conn_prop = connector.get_connector_properties(
brick_utils.get_root_helper(),
brick_utils.get_ip(nic),
multipath=multipath,
enforce_multipath=(enforce_multipath),
execute=None)
return conn_prop
def attach(self, volume_id, hostname, mountpoint=None, mode='rw',
multipath=False, enforce_multipath=False, nic=None):
"""Main entry point for trying to attach a volume.
If the cinderclient has a recent version that can do the new attach
workflow, lets try that. Otherwise we revert to the older attach
workflow.
"""
if self._use_legacy_attach:
return self._legacy_attach(volume_id, hostname,
mountpoint=mountpoint,
mode=mode, multipath=multipath,
enforce_multipath=enforce_multipath,
nic=nic)
else:
return self._attach(volume_id, hostname,
mountpoint=mountpoint,
mode=mode, multipath=multipath,
enforce_multipath=enforce_multipath,
nic=nic)
def _legacy_attach(self, volume_id, hostname, mountpoint=None, mode='rw',
multipath=False, enforce_multipath=False, nic=None):
"""The original/legacy attach workflow."""
# Reserve volume before attachment
with actions.Reserve(self.volumes_client, volume_id) as cmd:
cmd.reserve()
with actions.InitializeConnection(
self.volumes_client, volume_id) as cmd:
connection = cmd.initialize(self, multipath, enforce_multipath,
nic)
with actions.VerifyProtocol(self.volumes_client, volume_id) as cmd:
cmd.verify(connection['driver_volume_type'])
with actions.ConnectVolume(self.volumes_client, volume_id) as cmd:
brick_connector = self._brick_get_connector(
connection['driver_volume_type'], do_local_attach=True)
device_info = cmd.connect(brick_connector,
connection['data'],
mountpoint, mode, hostname)
return device_info
def _attach(self, volume_id, hostname, mountpoint=None, mode='rw',
multipath=False, enforce_multipath=False, nic=None):
"""Attempt to use the v3 API for attach workflow.
If the cinder API microversion is good enough, we will use the new
attach workflow, otherwise we resort back to the old workflow.
"""
# We can use the new attach/detach workflow
connector_properties = self.get_connector(
multipath=multipath,
enforce_multipath=enforce_multipath,
nic=nic
)
instance_id = uuidutils.generate_uuid()
info = self.volumes_client.attachments.create(
volume_id, connector_properties, instance_id)
connection = info['connection_info']
with actions.VerifyProtocol(self.volumes_client, volume_id) as cmd:
cmd.verify(connection['driver_volume_type'])
brick_connector = self._brick_get_connector(
connection['driver_volume_type'],
do_local_attach=True,
use_multipath=multipath,
)
device_info = brick_connector.connect_volume(connection)
# MV 3.44 requires this step to move the volume to 'in-use'.
self.volumes_client.attachments.complete(
info['connection_info']['attachment_id'])
return device_info
def detach(self, volume_id, attachment_uuid=None, multipath=False,
enforce_multipath=False, device_info=None, nic=None):
if self._use_legacy_attach:
self._legacy_detach(volume_id,
attachment_uuid=attachment_uuid,
multipath=multipath,
enforce_multipath=enforce_multipath,
device_info=device_info, nic=nic)
else:
self._detach(volume_id,
attachment_uuid=attachment_uuid,
multipath=multipath,
enforce_multipath=enforce_multipath,
device_info=device_info, nic=nic)
def _legacy_detach(self, volume_id, attachment_uuid=None, multipath=False,
enforce_multipath=False, device_info=None, nic=None):
"""The original/legacy detach workflow."""
with actions.BeginDetach(self.volumes_client, volume_id) as cmd:
cmd.reserve()
with actions.InitializeConnectionForDetach(
self.volumes_client, volume_id) as cmd:
connection = cmd.initialize(self, multipath, enforce_multipath,
nic)
brick_connector = self._brick_get_connector(
connection['driver_volume_type'],
do_local_attach=True,
use_multipath=multipath,
)
with actions.DisconnectVolume(self.volumes_client, volume_id) as cmd:
cmd.disconnect(brick_connector, connection['data'], device_info)
with actions.DetachVolume(self.volumes_client, volume_id) as cmd:
cmd.detach(self, attachment_uuid, multipath, enforce_multipath)
def _detach(self, volume_id, attachment_uuid=None, multipath=False,
enforce_multipath=False, device_info=None, nic=None):
if not attachment_uuid:
# We need the specific attachment uuid to know which one to detach.
# if None was passed in we can only work if there is one and only
# one attachment for the volume.
# Get the list of attachments for the volume.
search_opts = {'volume_id': volume_id}
attachments = self.volumes_client.attachments.list(
search_opts=search_opts)
if len(attachments) == 0:
raise exceptions.NoAttachmentsFound(volume_id=volume_id)
if len(attachments) == 1:
attachment_uuid = attachments[0].id
else:
# We have more than 1 attachment and we don't know which to use
raise exceptions.NeedAttachmentUUID(volume_id=volume_id)
attachment = self.volumes_client.attachments.show(attachment_uuid)
brick_connector = self._brick_get_connector(
attachment.connection_info['driver_volume_type'],
do_local_attach=True,
use_multipath=multipath,
)
with actions.DisconnectVolume(self.volumes_client, volume_id) as cmd:
cmd.disconnect(brick_connector,
attachment.connection_info,
device_info)
self.volumes_client.attachments.delete(attachment_uuid)
def get_volume_paths(self, volume_id, use_multipath=False):
"""Gets volume paths on the system for a specific volume."""
conn_props = self.get_connector(multipath=use_multipath)
vols = self.volumes_client.volumes.list()
vol_in_use = False
vol_found = False
for vol in vols:
if (volume_id == vol.id or volume_id == vol.name):
vol_found = True
if vol.status == "in-use":
vol_in_use = True
# Make sure the volume ID is used and not the name
volume_id = vol.id
break
if not vol_found:
msg = "No volume with a name or ID of '%s' exists." % volume_id
raise exceptions.CommandError(msg)
paths = []
if vol_in_use:
conn_info = self.volumes_client.volumes.initialize_connection(
volume_id, conn_props)
protocol = conn_info['driver_volume_type']
conn = self._brick_get_connector(protocol,
use_multipath=use_multipath)
paths = conn.get_volume_paths(conn_info['data'])
return paths
def get_all_volume_paths(self, protocol, use_multipath=False):
"""Gets all volume paths on the system for a given protocol."""
conn = self._brick_get_connector(protocol, use_multipath=use_multipath)
paths = conn.get_all_available_volumes()
return paths
| {
"content_hash": "dec345d91685cd213788419fb510d27d",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 42.02325581395349,
"alnum_prop": 0.5832872163807415,
"repo_name": "openstack/python-brick-cinderclient-ext",
"id": "18b440bd9d706d8a205b4047d657c0e6aab70b3e",
"size": "11388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brick_cinderclient_ext/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "55020"
},
{
"name": "Shell",
"bytes": "299"
}
],
"symlink_target": ""
} |
"""An integration test for datastore_write_it_pipeline
This test creates entities and writes them to Cloud Datastore. Subsequently,
these entities are read from Cloud Datastore, compared to the expected value
for the entity, and deleted.
There is no output; instead, we use `assert_that` transform to verify the
results in the pipeline.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import os
import random
import sys
import unittest
from datetime import datetime
from hamcrest.core.core.allof import all_of
from nose.plugins.attrib import attr
from apache_beam.testing.pipeline_verifiers import PipelineStateMatcher
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp import datastore_write_it_pipeline
except TypeError:
datastore_write_it_pipeline = None # type: ignore
@unittest.skipIf(
sys.version_info[0] == 3 and os.environ.get('RUN_SKIPPED_PY3_TESTS') != '1',
'This test still needs to be fixed on Python 3'
'TODO: BEAM-4543')
class DatastoreWriteIT(unittest.TestCase):
NUM_ENTITIES = 1001
LIMIT = 500
def run_datastore_write(self, limit=None):
test_pipeline = TestPipeline(is_integration_test=True)
current_time = datetime.now().strftime("%m%d%H%M%S")
seed = random.randint(0, 100000)
kind = 'testkind%s%d' % (current_time, seed)
pipeline_verifiers = [PipelineStateMatcher()]
extra_opts = {
'kind': kind,
'num_entities': self.NUM_ENTITIES,
'on_success_matcher': all_of(*pipeline_verifiers)
}
if limit is not None:
extra_opts['limit'] = limit
datastore_write_it_pipeline.run(
test_pipeline.get_full_options_as_args(**extra_opts))
@attr('IT')
@unittest.skipIf(
datastore_write_it_pipeline is None, 'GCP dependencies are not installed')
def test_datastore_write_limit(self):
self.run_datastore_write(limit=self.LIMIT)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| {
"content_hash": "6795e9caa854999bf2e411619b73cbb7",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 29.130434782608695,
"alnum_prop": 0.7189054726368159,
"repo_name": "axbaretto/beam",
"id": "4961da01c142f8a50f107cd502fe77b522057f91",
"size": "2795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/datastore_write_it_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import json
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
from openstack_dashboard.dashboards.admin.metadata_defs \
import forms as admin_forms
from openstack_dashboard.dashboards.admin.metadata_defs \
import tables as admin_tables
from openstack_dashboard.dashboards.admin.metadata_defs \
import tabs as admin_tabs
class AdminIndexView(tables.DataTableView):
table_class = admin_tables.AdminNamespacesTable
template_name = constants.METADATA_INDEX_TEMPLATE
def has_prev_data(self, table):
return self._prev
def has_more_data(self, table):
return self._more
def get_data(self):
namespaces = []
prev_marker = self.request.GET.get(
admin_tables.AdminNamespacesTable._meta.prev_pagination_param,
None)
if prev_marker is not None:
sort_dir = 'desc'
marker = prev_marker
else:
sort_dir = 'asc'
marker = self.request.GET.get(
admin_tables.AdminNamespacesTable._meta.pagination_param, None)
try:
namespaces, self._more, self._prev =\
glance.metadefs_namespace_list(self.request,
marker=marker,
paginate=True,
sort_dir=sort_dir)
if prev_marker is not None:
namespaces = sorted(namespaces,
key=lambda ns: getattr(ns, 'namespace'),
reverse=True)
except Exception:
self._prev = False
self._more = False
msg = _('Error getting metadata definitions.')
exceptions.handle(self.request, msg)
return namespaces
class CreateView(forms.ModalFormView):
form_class = admin_forms.CreateNamespaceForm
template_name = constants.METADATA_CREATE_TEMPLATE
context_object_name = 'namespace'
success_url = reverse_lazy(constants.METADATA_INDEX_URL)
class DetailView(tabs.TabView):
redirect_url = constants.METADATA_INDEX_URL
tab_group_class = admin_tabs.NamespaceDetailTabs
template_name = constants.METADATA_DETAIL_TEMPLATE
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["namespace"] = self.get_data()
return context
@memoized.memoized_method
def get_data(self):
try:
namespace = glance.metadefs_namespace_get(
self.request, self.kwargs['namespace_id'], wrap=True)
except Exception:
url = reverse_lazy(constants.METADATA_INDEX_URL)
exceptions.handle(self.request,
_('Unable to retrieve namespace details.'),
redirect=url)
else:
return namespace
def get_tabs(self, request, *args, **kwargs):
namespace = self.get_data()
return self.tab_group_class(request, namespace=namespace, **kwargs)
class ManageResourceTypes(forms.ModalFormView):
template_name = constants.METADATA_MANAGE_RESOURCES_TEMPLATE
form_class = admin_forms.ManageResourceTypesForm
success_url = reverse_lazy(constants.METADATA_INDEX_URL)
def get_initial(self):
try:
resource_types = glance.metadefs_namespace_resource_types(
self.request, self.kwargs["id"])
except Exception:
resource_types = []
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
return {'id': self.kwargs["id"],
'resource_types': resource_types}
def get_context_data(self, **kwargs):
context = super(ManageResourceTypes, self).get_context_data(**kwargs)
selected_type_names = [selected_type['name'] for selected_type in
context['form'].initial['resource_types']]
try:
# Set the basic types that aren't already associated
result = [unselected_type for unselected_type in
glance.metadefs_resource_types_list(self.request)
if unselected_type['name'] not in selected_type_names]
except Exception:
result = []
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
# Add the resource types previously associated, includes prefix, etc
for initial_type in context['form'].initial['resource_types']:
selected_type = initial_type.copy()
selected_type['selected'] = True
result.insert(0, selected_type)
context['id'] = self.kwargs['id']
try:
context["resource_types"] = json.dumps(result)
except Exception:
context["resource_types"] = "[]"
msg = _('Error getting resource type associations.')
exceptions.handle(self.request, msg)
return context
| {
"content_hash": "11f1e0445350232c5c891809d514bb23",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 36.42567567567568,
"alnum_prop": 0.6112038582823224,
"repo_name": "CiscoSystems/avos",
"id": "dce599611db8c50010ed60a3af298b4d0da86227",
"size": "6032",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/metadata_defs/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "85008"
},
{
"name": "HTML",
"bytes": "457426"
},
{
"name": "JavaScript",
"bytes": "904618"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4318649"
},
{
"name": "Scala",
"bytes": "894"
},
{
"name": "Shell",
"bytes": "17503"
}
],
"symlink_target": ""
} |
"""ImageNet preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow.compat.v1 as tf
IMAGE_SIZE = 224
CROP_PADDING = 32
def distorted_bounding_box_crop(image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image_bytes: `Tensor` of binary image data.
bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]`
where each coordinate is [0, 1) and the coordinates are arranged
as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding
box supplied.
aspect_ratio_range: An optional list of `float`s. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `float`s. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional `str` for name scope.
Returns:
cropped image `Tensor`
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image_bytes, bbox]):
shape = tf.image.extract_jpeg_shape(image_bytes)
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
shape,
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, _ = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
return image
def _at_least_x_are_equal(a, b, x):
"""At least `x` of `a` and `b` `Tensors` are equal."""
match = tf.equal(a, b)
match = tf.cast(match, tf.int32)
return tf.greater_equal(tf.reduce_sum(match), x)
def _resize_image(image, image_size, method=None):
if method is not None:
tf.logging.info('Use customized resize method {}'.format(method))
return tf.image.resize([image], [image_size, image_size], method)[0]
tf.logging.info('Use default resize_bicubic.')
return tf.image.resize_bicubic([image], [image_size, image_size])[0]
def _decode_and_random_crop(image_bytes, image_size, resize_method=None):
"""Make a random crop of image_size."""
bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4])
image = distorted_bounding_box_crop(
image_bytes,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3. / 4, 4. / 3.),
area_range=(0.08, 1.0),
max_attempts=10,
scope=None)
original_shape = tf.image.extract_jpeg_shape(image_bytes)
bad = _at_least_x_are_equal(original_shape, tf.shape(image), 3)
image = tf.cond(
bad,
lambda: _decode_and_center_crop(image_bytes, image_size),
lambda: _resize_image(image, image_size, resize_method))
return image
def _decode_and_center_crop(image_bytes, image_size, resize_method=None):
"""Crops to center of image with padding then scales image_size."""
shape = tf.image.extract_jpeg_shape(image_bytes)
image_height = shape[0]
image_width = shape[1]
padded_center_crop_size = tf.cast(
((image_size / (image_size + CROP_PADDING)) *
tf.cast(tf.minimum(image_height, image_width), tf.float32)),
tf.int32)
offset_height = ((image_height - padded_center_crop_size) + 1) // 2
offset_width = ((image_width - padded_center_crop_size) + 1) // 2
crop_window = tf.stack([offset_height, offset_width,
padded_center_crop_size, padded_center_crop_size])
image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
image = _resize_image(image, image_size, resize_method)
return image
def _flip(image):
"""Random horizontal image flip."""
image = tf.image.random_flip_left_right(image)
return image
def preprocess_for_train(image_bytes,
use_bfloat16,
image_size=IMAGE_SIZE,
augment_name=None,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: resize method. If none, use bicubic.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_random_crop(image_bytes, image_size, resize_method)
image = _flip(image)
image = tf.reshape(image, [image_size, image_size, 3])
if augment_name:
try:
from tensorflow_examples.lite.model_maker.third_party.efficientdet.backbone import autoaugment # pylint: disable=g-import-not-at-top
except ImportError as e:
logging.exception('Autoaugment is not supported in TF 2.x.')
raise e
logging.info('Apply AutoAugment policy %s', augment_name)
input_image_type = image.dtype
image = tf.clip_by_value(image, 0.0, 255.0)
image = tf.cast(image, dtype=tf.uint8)
if augment_name == 'autoaugment':
logging.info('Apply AutoAugment policy %s', augment_name)
image = autoaugment.distort_image_with_autoaugment(image, 'v0')
elif augment_name == 'randaugment':
image = autoaugment.distort_image_with_randaugment(
image, randaug_num_layers, randaug_magnitude)
else:
raise ValueError('Invalid value for augment_name: %s' % (augment_name))
image = tf.cast(image, dtype=input_image_type)
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_for_eval(image_bytes,
use_bfloat16,
image_size=IMAGE_SIZE,
resize_method=None):
"""Preprocesses the given image for evaluation.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
resize_method: if None, use bicubic.
Returns:
A preprocessed image `Tensor`.
"""
image = _decode_and_center_crop(image_bytes, image_size, resize_method)
image = tf.reshape(image, [image_size, image_size, 3])
image = tf.image.convert_image_dtype(
image, dtype=tf.bfloat16 if use_bfloat16 else tf.float32)
return image
def preprocess_image(image_bytes,
is_training=False,
use_bfloat16=False,
image_size=IMAGE_SIZE,
augment_name=None,
randaug_num_layers=None,
randaug_magnitude=None,
resize_method=None):
"""Preprocesses the given image.
Args:
image_bytes: `Tensor` representing an image binary of arbitrary size.
is_training: `bool` for whether the preprocessing is for training.
use_bfloat16: `bool` for whether to use bfloat16.
image_size: image size.
augment_name: `string` that is the name of the augmentation method
to apply to the image. `autoaugment` if AutoAugment is to be used or
`randaugment` if RandAugment is to be used. If the value is `None` no
augmentation method will be applied applied. See autoaugment.py for more
details.
randaug_num_layers: 'int', if RandAug is used, what should the number of
layers be. See autoaugment.py for detailed description.
randaug_magnitude: 'int', if RandAug is used, what should the magnitude
be. See autoaugment.py for detailed description.
resize_method: 'string' or None. Use resize_bicubic in default.
Returns:
A preprocessed image `Tensor` with value range of [0, 255].
"""
if is_training:
return preprocess_for_train(
image_bytes, use_bfloat16, image_size, augment_name,
randaug_num_layers, randaug_magnitude, resize_method)
else:
return preprocess_for_eval(image_bytes, use_bfloat16, image_size,
resize_method)
| {
"content_hash": "93e7d4c04777e652a0c8559821366d1d",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 139,
"avg_line_length": 39.21862348178138,
"alnum_prop": 0.652111076700733,
"repo_name": "tensorflow/examples",
"id": "85b94e6bb03b61dff40cba509736fa890c03dd42",
"size": "10376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_examples/lite/model_maker/third_party/efficientdet/backbone/preprocessing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "106227"
},
{
"name": "CMake",
"bytes": "1553"
},
{
"name": "CSS",
"bytes": "4746"
},
{
"name": "Dockerfile",
"bytes": "467"
},
{
"name": "HTML",
"bytes": "12491"
},
{
"name": "Java",
"bytes": "305092"
},
{
"name": "JavaScript",
"bytes": "24461"
},
{
"name": "Jupyter Notebook",
"bytes": "1733035"
},
{
"name": "Kotlin",
"bytes": "631463"
},
{
"name": "Objective-C",
"bytes": "14639"
},
{
"name": "Objective-C++",
"bytes": "14293"
},
{
"name": "Python",
"bytes": "1232357"
},
{
"name": "Ruby",
"bytes": "3744"
},
{
"name": "Shell",
"bytes": "41573"
},
{
"name": "Starlark",
"bytes": "17498"
},
{
"name": "Swift",
"bytes": "553535"
}
],
"symlink_target": ""
} |
"""
BoardGameGeek.com interface for pyprototypr
Based off of the BGGGame object, for example::
{'_comments': [],
'_data': {'accessory': False,
'alternative_names': ['德国大选', '디 마허'],
'artists': ['Marcus Gschwendtner', 'Harald Lieske'],
'categories': ['Economic', 'Negotiation', 'Political'],
'description': 'Die Macher is a game about seven sequential '
'political races in different regions of Germany. '
'Players are in charge of national political '
'parties, and must manage limited resources to help '
'their party to victory. The winning party will have '
'the most victory points after all the regional '
'elections. There are four different ways of scoring '
'victory points. First, each regional election can '
'supply one to eighty victory points, depending on '
'the size of the region and how well your party does '
'in it. Second, if a party wins a regional election '
'and has some media influence in the region, then '
'the party will receive some media-control victory '
'points. Third, each party has a national party '
'membership which will grow as the game progresses '
'and this will supply a fair number of victory '
'points. Lastly, parties score some victory points '
'if their party platform matches the national '
'opinions at the end of the game.\n'
'\n'
'The 1986 edition featured four parties from the old '
'West Germany and supported 3-4 players. The 1997 '
'edition supports up to five players in the '
're-united Germany and updated several features of '
'the rules as well. The 2006 edition also supports '
'up to five players and adds a shorter five-round '
'variant and additional rules updates by the '
'original designer.\n'
'\n',
'designers': ['Karl-Heinz Schmiel'],
'expands': [],
'expansion': False,
'expansions': [],
'families': ['Country: Germany',
'Political: Elections',
'Series: Classic Line (Valley Games)'],
'id': 1,
'image': 'https://cf.geekdo-images.com/rpwCZAjYLD940NWwP3SRoA__original/img/yR0aoBVKNrAmmCuBeSzQnMflLYg=/0x0/filters:format(jpeg)/pic4718279.jpg',
'implementations': [],
'maxplayers': 5,
'maxplaytime': 240,
'mechanics': ['Alliances',
'Area Majority / Influence',
'Auction/Bidding',
'Dice Rolling',
'Hand Management',
'Simultaneous Action Selection'],
'minage': 14,
'minplayers': 3,
'minplaytime': 240,
'name': 'Die Macher',
'playingtime': 240,
'publishers': ['Hans im Glück',
'Moskito Spiele',
'Portal Games',
'Spielworxx',
'sternenschimmermeer',
'Stronghold Games',
'Valley Games, Inc.',
'YOKA Games'],
'stats': {'average': 7.61437,
'averageweight': 4.3206,
'bayesaverage': 7.10354,
'median': 0.0,
'numcomments': 2011,
'numweights': 761,
'owned': 7511,
'ranks': [{'friendlyname': 'Board Game Rank',
'id': '1',
'name': 'boardgame',
'value': 316},
{'friendlyname': 'Strategy Game Rank',
'id': '5497',
'name': 'strategygames',
'value': 180}],
'stddev': 1.58031,
'trading': 249,
'usersrated': 5356,
'wanting': 504,
'wishing': 2050},
'suggested_players': {'results': {
'1': {'best_rating': 0,
'not_recommended_rating': 83,
'recommended_rating': 1},
'2': {'best_rating': 0,
'not_recommended_rating': 85,
'recommended_rating': 1},
'3': {'best_rating': 2,
'not_recommended_rating': 73,
'recommended_rating': 26},
'4': {'best_rating': 25,
'not_recommended_rating': 9,
'recommended_rating': 84},
'5': {'best_rating': 111,
'not_recommended_rating': 2,
'recommended_rating': 12},
'5+': {'best_rating': 1,
'not_recommended_rating': 59,
'recommended_rating': 0}},
'total_votes': 132},
'thumbnail': 'https://cf.geekdo-images.com/rpwCZAjYLD940NWwP3SRoA__thumb/img/YT6svCVsWqLrDitcMEtyazVktbQ=/fit-in/200x150/filters:strip_icc()/pic4718279.jpg',
'yearpublished': 1986},
'_expands': [],
'_expands_set': set(),
'_expansions': [],
'_expansions_set': set(),
'_id': 1,
'_image': 'https://cf.geekdo-images.com/rpwCZAjYLD940NWwP3SRoA__original/img/yR0aoBVKNrAmmCuBeSzQnMflLYg=/0x0/filters:format(jpeg)/pic4718279.jpg',
'_name': 'Die Macher',
'_player_suggestion': [<boardgamegeek.objects.games.PlayerSuggestion>,
<boardgamegeek.objects.games.PlayerSuggestion>,
<boardgamegeek.objects.games.PlayerSuggestion>,
<boardgamegeek.objects.games.PlayerSuggestion>,
<boardgamegeek.objects.games.PlayerSuggestion>,
<boardgamegeek.objects.games.PlayerSuggestion>],
'_stats': <boardgamegeek.objects.games.BoardGameStats>,
'_thumbnail': 'https://cf.geekdo-images.com/rpwCZAjYLD940NWwP3SRoA__thumb/img/YT6svCVsWqLrDitcMEtyazVktbQ=/fit-in/200x150/filters:strip_icc()/pic4718279.jpg',
'_versions': [],
'_versions_set': set(),
'_videos': [],
'_videos_ids': set(),
'_year_published': 1986
}
"""
# future
from __future__ import division
# lib
# third party
from boardgamegeek import BGGClient
class BGGGameList(object):
"""Lists which are groups of multiple games' string-based properties."""
def __init__(self):
"""create empty lists to hold values"""
self.bgg = BGGClient()
self.alternative_names = []
self.artists = []
self.average = []
self.averageweight = []
self.bayesaverage = []
self.categories = []
self.description = []
self.designers = []
self.expands = []
self.expansion = []
self.expansions = []
self.families = []
self.id = []
self.image = []
self.implementations = []
self.maxplayers = []
self.mechanics = []
self.median = []
self.minage = []
self.minplayers = []
self.name = []
self.numcomments = []
self.numweights = []
self.owned = []
self.playingtime = []
self.publishers = []
self.ranks = []
self.stddev = []
self.thumbnail = []
self.trading = []
self.usersrated = []
self.wanting = []
self.wishing = []
self.yearpublished = []
# custom fields
self.players = []
self.description_short = []
self.age = []
def set_values(self, game):
"""Append a game's property to a matching list."""
self._game = game # BGGGame object
if self._game:
self.alternative_names.append(self._game.alternative_names)
self.artists.append(self._game.artists)
self.average.append(self._game.average)
self.averageweight.append(self._game.averageweight)
self.bayesaverage.append(self._game.bayesaverage)
self.categories.append(self._game.categories)
self.description.append(self._game.description)
self.designers.append(self._game.designers)
self.expands.append(self._game.expands)
self.expansion.append(self._game.expansion)
self.expansions.append(self._game.expansions)
self.families.append(self._game.families)
self.id.append(self._game.id)
self.image.append(self._game.image)
self.implementations.append(self._game.implementations)
self.maxplayers.append(self._game.maxplayers)
self.mechanics.append(self._game.mechanics)
self.median.append(self._game.median)
self.minage.append(self._game.minage)
self.minplayers.append(self._game.minplayers)
self.name.append(self._game.name)
self.numcomments.append(self._game.numcomments)
self.numweights.append(self._game.numweights)
self.owned.append(self._game.owned)
self.playingtime.append(self._game.playingtime)
self.publishers.append(self._game.publishers)
self.ranks.append(self._game.ranks)
self.stddev.append(self._game.stddev)
self.thumbnail.append(self._game.thumbnail)
self.trading.append(self._game.trading)
self.usersrated.append(self._game.usersrated)
self.wanting.append(self._game.wanting)
self.wishing.append(self._game.wishing)
self.yearpublished.append(self._game.yearpublished)
# custom fields
self.players.append(self._game.players)
self.description_short.append(self._game.description_short)
self.age.append(self._game.age)
class BGGGame(object):
"""Wrapper around the `game` object from boardgamegeek.api"""
def __init__(self, game_id, short=500):
"""
Args:
short: int
number of characters to use for short description
"""
#try:
self._game = None
self.short = int(short) or 500
self.bgg = BGGClient()
if isinstance(game_id, int):
self._game = self.bgg.game(game_id=game_id)
elif isinstance(game_id, ""):
self._game = self.bgg.game(name=game_id)
else:
pass
self.set_properties()
#except Exception as err:
# log.error('Unable to create game:%s (%s)', game_id, err)
def get_description_short(self):
"""Create an abbreviated description for a game."""
if self._game:
desc = self._game.description[0:self.short]
_cut = int(
(len(desc) -
len(desc.replace(',', '').replace('.', '').replace(':', '')))
/ 2 + self.short)
desc = self._game.description[0:_cut]
return desc[0:-3] + '...'
def set_properties(self):
"""Create both raw (_ prefix) and string formatted versions of props"""
if self._game:
self._alternative_names = self._game.alternative_names
self.alternative_names = ', '.join(self._game.alternative_names)
self._artists = self._game.artists
self.artists = ', '.join(self._game.artists)
self._average = self._game.stats['average']
self.average = '%.3f' % self._game.stats['average']
self._averageweight = self._game.stats['averageweight']
self.averageweight = '%.3f' % self._game.stats['averageweight']
self._bayesaverage = self._game.stats['bayesaverage']
self.bayesaverage = '%.3f' % self._game.stats['bayesaverage']
self._categories = self._game.categories
self.categories = ', '.join(self._game.categories)
self._description = self._game.description
self.description = f"{self._game.description}"
self._designers = self._game.designers
self.designers = ', '.join(self._game.designers)
self._expands = self._game.expands
self.expands = ', '.join(self._game.expands)
self._expansion = self._game.expansion
if self._game.expansion is True:
self.expansion = 'Yes'
else:
self.expansion = 'False'
self._expansions = self._game.expansions
if self._expansions:
names = []
for exp in self._expansions:
names.append(exp.name)
self.expansions = ', '.join(names)
else:
self.expansions = ''
self._families = self._game.families
self.families = ', '.join(self._game.families)
self._id = self._game.id
self.id = f"{self._game.id}"
self._image = self._game.image
self.image = f"{self._game.image}"
self._implementations = self._game.implementations
self.implementations = ', '.join(self._game.implementations)
self._maxplayers = self._game.maxplayers
self.maxplayers = f"{self._game.maxplayers}"
self._mechanics = self._game.mechanics
self.mechanics = ', '.join(self._game.mechanics)
self._median = self._game.stats['median']
self.median = '%.3f' % self._game.stats['median']
self._minage = self._game.minage
self.minage = f"{self._game.minage}"
self._minplayers = self._game.minplayers
self.minplayers = f"{self._game.minplayers}"
self._name = self._game.name
self.name = f"{self._game.name}"
self._numcomments = self._game.stats['numcomments']
self.numcomments = f"{self._game.stats['numcomments']}"
self._numweights = self._game.stats['numweights']
self.numweights = f"{self._game.stats['numweights']}"
self._owned = self._game.stats['owned']
self.owned = f"{self._game.stats['owned']}"
self._playingtime = self._game.playingtime
self.playingtime = f"{self._game.playingtime}"
self._publishers = self._game.publishers
self.publishers = ', '.join(self._game.publishers)
self._ranks = self._game.stats['ranks']
self.ranks = f"{self._game.stats['ranks']}"
self._stddev = self._game.stats['stddev']
self.stddev = '%.3f' % self._game.stats['stddev']
self._thumbnail = self._game.thumbnail
self.thumbnail = f"{self._game.thumbnail}"
self._trading = self._game.stats['trading']
self.trading = f"{self._game.stats['trading']}"
self._usersrated = self._game.stats['usersrated']
self.usersrated = f"{self._game.stats['usersrated']}"
self._wanting = self._game.stats['wanting']
self.wanting = f"{self._game.stats['wanting']}"
self._wishing = self._game.stats['wishing']
self.wishing = f"{self._game.stats['wishing']}"
self._yearpublished = self._game.yearpublished
self.yearpublished = f"{self._game.yearpublished}"
# custom fields
self.description_short = self.get_description_short()
self._description_short = self.description_short
if self._game.minplayers == self._game.maxplayers:
self.players = f"{self._game.maxplayers}"
else:
self.players = f"{self._game.minplayers}-{self._game.maxplayers}"
self._players = (self._game.minplayers, self._game.maxplayers)
self.age = f"{self._game.minage}+"
self._age = self._game.minage
| {
"content_hash": "f3a841fc4f99122601bc4204b1e59476",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 172,
"avg_line_length": 48.04788732394366,
"alnum_prop": 0.5053057395790584,
"repo_name": "gamesbook/pyprototypr",
"id": "97bbb0165f1be6203fe49292b43e769198c1293f",
"size": "17118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyprototypr/bgg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "193315"
}
],
"symlink_target": ""
} |
import numpy as np
import time
from tridesclous.waveformtools import extract_chunks
#~ size = 1000000
size = 1000
nb_channel = 5
#~ nb_channel = 300
width = 150
nb_peak = 20003
def test_extract_chunks_memory():
signals = np.random.randn(size, nb_channel).astype('float32')
indexes = np.random.randint(low=width, high=size-width, size=nb_peak)
t0 = time.perf_counter()
chunks = extract_chunks(signals, indexes, width, chunks=None)
t1 = time.perf_counter()
print('extract_chunks no buffer', t1-t0)
chunks[:] = 0
t0 = time.perf_counter()
chunks = extract_chunks(signals, indexes, width, chunks=chunks)
t1 = time.perf_counter()
print('extract_chunks with buffer', t1-t0)
def test_extract_chunks_memmap():
signals = np.memmap('test_extract_wf_signal', dtype='float32', mode='w+', shape=(size, nb_channel))
indexes = np.random.randint(low=width, high=size-width, size=nb_peak)
chunks = np.memmap('test_extract_wf_chunks', dtype='float32', mode='w+', shape=(nb_peak, width, nb_channel))
t0 = time.perf_counter()
chunks = extract_chunks(signals, indexes, width, chunks=chunks)
chunks.flush()
t1 = time.perf_counter()
print('extract_chunks memmap to memmap', t1-t0)
def test_extract_chunks_with_channel_indexes():
signals = np.random.randn(size, nb_channel).astype('float32')
indexes = np.random.randint(low=width, high=size-width, size=nb_peak)
t0 = time.perf_counter()
chunks = extract_chunks(signals, indexes, width, channel_indexes=[0,2,3], chunks=None)
t1 = time.perf_counter()
print('extract_chunks channel_indexes', t1-t0)
if __name__ == '__main__':
test_extract_chunks_memory()
test_extract_chunks_memmap()
test_extract_chunks_with_channel_indexes()
| {
"content_hash": "621458c3248af6cfa6b7f9d1cbf091c8",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 112,
"avg_line_length": 26.705882352941178,
"alnum_prop": 0.6657488986784141,
"repo_name": "tridesclous/tridesclous",
"id": "7273637a9b0149ce91c3ad6c1c0da08442015e42",
"size": "1816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tridesclous/tests/test_waveformtools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "5644"
},
{
"name": "Jupyter Notebook",
"bytes": "29404"
},
{
"name": "Python",
"bytes": "2160924"
}
],
"symlink_target": ""
} |
from django.contrib.auth.models import AbstractUser
from django.db import models
from dicom_models.staging.models import RadiologyStudy
from prioritizers import registry as prioritizers
from solo.models import SingletonModel
class StudyList(models.Model):
name = models.CharField(max_length=100, blank=True, null=True)
studies = models.ManyToManyField(RadiologyStudy)
def __unicode__(self):
if self.name:
return u'%s' % self.name
return u'Study List Object'
class Reviewer(AbstractUser):
prioritizer = models.CharField(max_length=100, blank=True, null=True,
choices=prioritizers.choices)
study_list = models.ForeignKey(StudyList, null=True, blank=True)
class Config(SingletonModel):
default_prioritizer = models.CharField(max_length=100, blank=True, null=True,
choices=prioritizers.choices)
default_study_list = models.ForeignKey(StudyList, null=True, blank=True)
def __unicode__(self):
return u'App configuration'
class Meta:
verbose_name = "App Configuration"
verbose_name_plural = "App Configuration"
| {
"content_hash": "a297124da0528e1002e4e1ce7ad7fba1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 81,
"avg_line_length": 35.09375,
"alnum_prop": 0.717720391807658,
"repo_name": "chop-dbhi/django-dicom-review",
"id": "ab27822b0c87deab703679ddb0a0a141e8b350e3",
"size": "1123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dicom_review/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "619097"
},
{
"name": "Python",
"bytes": "22452"
},
{
"name": "Scala",
"bytes": "2056"
}
],
"symlink_target": ""
} |
ACCOUNT_NAME = 'Tiger Chef'
| {
"content_hash": "614ed4f9c511416784e0d5f5882bcb0b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.7142857142857143,
"repo_name": "ddy88958620/lib",
"id": "bce8a752bfe891fa1e65e33d66059ae52125e0aa",
"size": "28",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python/scrapy/tigerchef/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""Global Registry for the task adaptation framework.
"""
import ast
import functools
import logging
def partialclass(cls, *base_args, **base_kwargs):
"""Builds a subclass with partial application of the given args and keywords.
Equivalent to functools.partial performance, base_args are preprended to the
positional arguments given during object initialization and base_kwargs are
updated with the kwargs given later.
Args:
cls: The base class.
*base_args: Positional arguments to be applied to the subclass.
**base_kwargs: Keyword arguments to be applied to the subclass.
Returns:
A subclass of the input class.
Author: Joan Puigcerver (jpuigcerver@)
"""
class _NewClass(cls):
def __init__(self, *args, **kwargs):
bound_args = base_args + args
bound_kwargs = base_kwargs.copy()
bound_kwargs.update(kwargs)
super(_NewClass, self).__init__(*bound_args, **bound_kwargs)
return _NewClass
def partialfactory(cls, lookup_string, *base_args, **base_kwargs):
"""Builds a factory with partial application of given args and keywords.
Equivalent to functools.partial, but with a warning to prevent potential
future headaches when overwriting keyword arguments. base_args are prepended
to the positional arguments given during initialization and base_kwargs are
updated with the kwargs given later.
Args:
cls: The base class.
lookup_string: String for logging purposes.
*base_args: Positional arguments to be preprended to all factory calls.
**base_kwargs: Keyword arguments to be applied to all factory calls. These
may be overwritten by kwargs in factory calls.
Returns:
A factory function creating objects of class `cls`.
Author: Joan Puigcerver (jpuigcerver@),
modifications by Paul Rubenstein (paulrubenstein@)
"""
def _factory_fn(*args, **kwargs):
"""A factory function that creates objects of the registered class."""
args = base_args + args
for k, v in base_kwargs.items():
if k in kwargs:
# Warning to prevent future headaches.
logging.warning(
"The default kwarg %r=%r, used in the lookup string %r, "
"is overridden by the call to the resulting factory. "
"Notice that this may lead to some unexpected behavior.", k, v,
lookup_string)
else:
kwargs[k] = v
return cls(*args, **kwargs)
return _factory_fn
def parse_name(string_to_parse):
"""Parses input to the registry's lookup function.
Args:
string_to_parse: can be either an arbitrary name or function call
(optionally with positional and keyword arguments). e.g. "multiclass",
"resnet50_v2(filters_factor=8)".
Returns:
A tuple of input name, argument tuple and a keyword argument dictionary.
Examples:
"multiclass" -> ("multiclass", (), {})
"resnet50_v2(9, filters_factor=4)" ->
("resnet50_v2", (9,), {"filters_factor": 4})
Author: Joan Puigcerver (jpuigcerver@)
"""
expr = ast.parse(string_to_parse, mode="eval").body # pytype: disable=attribute-error
if not isinstance(expr, (ast.Attribute, ast.Call, ast.Name)):
raise ValueError(
"The given string should be a name or a call, but a {} was parsed from "
"the string {!r}".format(type(expr), string_to_parse))
# Notes:
# name="some_name" -> type(expr) = ast.Name
# name="module.some_name" -> type(expr) = ast.Attribute
# name="some_name()" -> type(expr) = ast.Call
# name="module.some_name()" -> type(expr) = ast.Call
if isinstance(expr, ast.Name):
return string_to_parse, (), {}
elif isinstance(expr, ast.Attribute):
return string_to_parse, (), {}
def _get_func_name(expr):
if isinstance(expr, ast.Attribute):
return _get_func_name(expr.value) + "." + expr.attr
elif isinstance(expr, ast.Name):
return expr.id
else:
raise ValueError(
"Type {!r} is not supported in a function name, the string to parse "
"was {!r}".format(type(expr), string_to_parse))
def _get_func_args_and_kwargs(call):
args = tuple([ast.literal_eval(arg) for arg in call.args])
kwargs = {
kwarg.arg: ast.literal_eval(kwarg.value) for kwarg in call.keywords
}
return args, kwargs
func_name = _get_func_name(expr.func)
func_args, func_kwargs = _get_func_args_and_kwargs(expr)
return func_name, func_args, func_kwargs
class Registry(object):
"""Implements global Registry.
Authors: Joan Puigcerver (jpuigcerver@), Alexander Kolesnikov (akolesnikov@)
"""
_GLOBAL_REGISTRY = {}
@staticmethod
def global_registry():
return Registry._GLOBAL_REGISTRY
@staticmethod
def register(name, item_type, replace=False):
"""Creates a function that registers its input."""
if item_type not in ["object", "function", "factory", "class"]:
raise ValueError("Unknown item type: %s" % item_type)
def _register(item):
if name in Registry.global_registry() and not replace:
raise KeyError(
"The name {!r} was already registered in with type {!r}".format(
name, item_type))
Registry.global_registry()[name] = (item, item_type)
return item
return _register
@staticmethod
def lookup(lookup_string, kwargs_extra=None):
"""Lookup a name in the registry."""
name, args, kwargs = parse_name(lookup_string)
if kwargs_extra:
kwargs.update(kwargs_extra)
item, item_type = Registry.global_registry()[name]
if item_type == "function":
return functools.partial(item, *args, **kwargs)
elif item_type == "object":
return item(*args, **kwargs)
elif item_type == "factory":
return partialfactory(item, lookup_string, *args, **kwargs)
elif item_type == "class":
return partialclass(item, *args, **kwargs)
| {
"content_hash": "0e3441e920bee38e6a743df7a3e4cfb1",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 88,
"avg_line_length": 32.10989010989011,
"alnum_prop": 0.6652977412731006,
"repo_name": "google-research/scenic",
"id": "3fa6e59dad169795ccf5d90ec50e512dcf02ee0b",
"size": "6428",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scenic/dataset_lib/big_transfer/registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1717873"
},
{
"name": "Python",
"bytes": "3692184"
}
],
"symlink_target": ""
} |
from typing import Union
# noinspection PyPackageRequirements
from discord import Interaction, Member, User, Embed, Color
# noinspection PyPackageRequirements
from discord.ext.commands import Bot
# noinspection PyPackageRequirements
from discord import app_commands
from minqlx import Plugin
@app_commands.context_menu(name="qlstats")
@app_commands.guild_only()
async def qlstats(interaction: Interaction, _item: Union[Member, User]) -> None:
embed = Embed(color=Color.blurple())
url = Plugin.get_cvar("qlx_discord_ext_qlstats_url")
embed.url = url
embed.title = "QL stats page"
await interaction.response.send_message(embed=embed, ephemeral=True)
async def setup(bot: Bot):
bot.tree.add_command(qlstats)
| {
"content_hash": "694b2818d11cd8f108793b93e7f3989e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 80,
"avg_line_length": 30.625,
"alnum_prop": 0.763265306122449,
"repo_name": "mgaertne/minqlx-plugin-tests",
"id": "21816212220fed611054c0dee1443478241f582b",
"size": "735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experimental/python/discord_extensions/qlstats.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "963887"
}
],
"symlink_target": ""
} |
class Solution:
def mostCommonWord(self, paragraph, banned):
"""
:type paragraph: str
:type banned: List[str]
:rtype: str
"""
paragraph = paragraph.strip('!?\',;.').lower()
counter = collections.Counter(paragraph.split())
for word in banned:
counter[word] = 0
return counter.most_common(1)[0][0] | {
"content_hash": "9266d90c00889ba4e3537dbcaab8408a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 56,
"avg_line_length": 31.916666666666668,
"alnum_prop": 0.5483028720626631,
"repo_name": "Mlieou/lXXtcode",
"id": "9eab4653cb9247be7abc2588462c78c5e75e4b28",
"size": "383",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "leetcode/python/ex_819.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "44654"
},
{
"name": "Java",
"bytes": "46838"
},
{
"name": "Python",
"bytes": "186767"
},
{
"name": "Shell",
"bytes": "127"
}
],
"symlink_target": ""
} |
"""
Django settings for tilebundler_prj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TILEBUNDLER_CONFIG = {
'tileset_dir': '/Users/s30244/dev/mbtiles'
}
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e#i*49t%&&=jbfs64hb8(fj(m8gqicz9h3+!3(y#(9k!uu#sd!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tilebundler',
'tastypie'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tilebundler_prj.urls'
WSGI_APPLICATION = 'tilebundler_prj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "9794cbda1e8f0f236a3a65e1cdedf1ba",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 23.931818181818183,
"alnum_prop": 0.717948717948718,
"repo_name": "ROGUE-JCTD/django-tilebundler",
"id": "44e912dcd9577ad43fb4e4e739a60d6a83996cf5",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tilebundler_prj/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "26"
},
{
"name": "HTML",
"bytes": "835"
},
{
"name": "Python",
"bytes": "31771"
}
],
"symlink_target": ""
} |
import inspect
import io
import re
from contextlib import redirect_stdout
from unittest import mock
from unittest.mock import MagicMock
import pytest
from alembic.autogenerate import compare_metadata
from alembic.config import Config
from alembic.migration import MigrationContext
from alembic.runtime.environment import EnvironmentContext
from alembic.script import ScriptDirectory
from sqlalchemy import MetaData
from airflow.exceptions import AirflowException
from airflow.models import Base as airflow_base
from airflow.settings import engine
from airflow.utils.db import (
check_migrations,
compare_server_default,
compare_type,
create_default_connections,
downgrade,
resetdb,
upgradedb,
)
class TestDb:
def test_database_schema_and_sqlalchemy_model_are_in_sync(self):
all_meta_data = MetaData()
for (table_name, table) in airflow_base.metadata.tables.items():
all_meta_data._add_table(table_name, table.schema, table)
# create diff between database schema and SQLAlchemy model
mctx = MigrationContext.configure(
engine.connect(),
opts={'compare_type': compare_type, 'compare_server_default': compare_server_default},
)
diff = compare_metadata(mctx, all_meta_data)
# known diffs to ignore
ignores = [
# ignore tables created by celery
lambda t: (t[0] == 'remove_table' and t[1].name == 'celery_taskmeta'),
lambda t: (t[0] == 'remove_table' and t[1].name == 'celery_tasksetmeta'),
# ignore indices created by celery
lambda t: (t[0] == 'remove_index' and t[1].name == 'task_id'),
lambda t: (t[0] == 'remove_index' and t[1].name == 'taskset_id'),
# from test_security unit test
lambda t: (t[0] == 'remove_table' and t[1].name == 'some_model'),
# MSSQL default tables
lambda t: (t[0] == 'remove_table' and t[1].name == 'spt_monitor'),
lambda t: (t[0] == 'remove_table' and t[1].name == 'spt_fallback_db'),
lambda t: (t[0] == 'remove_table' and t[1].name == 'spt_fallback_usg'),
lambda t: (t[0] == 'remove_table' and t[1].name == 'MSreplication_options'),
lambda t: (t[0] == 'remove_table' and t[1].name == 'spt_fallback_dev'),
# MSSQL foreign keys where CASCADE has been removed
lambda t: (t[0] == 'remove_fk' and t[1].name == 'task_reschedule_dr_fkey'),
lambda t: (t[0] == 'add_fk' and t[1].name == 'task_reschedule_dr_fkey'),
# Ignore flask-session table/index
lambda t: (t[0] == 'remove_table' and t[1].name == 'session'),
lambda t: (t[0] == 'remove_index' and t[1].name == 'session_id'),
# sqlite sequence is used for autoincrementing columns created with `sqlite_autoincrement` option
lambda t: (t[0] == 'remove_table' and t[1].name == 'sqlite_sequence'),
]
for ignore in ignores:
diff = [d for d in diff if not ignore(d)]
assert not diff, 'Database schema and SQLAlchemy model are not in sync: ' + str(diff)
def test_only_single_head_revision_in_migrations(self):
config = Config()
config.set_main_option("script_location", "airflow:migrations")
script = ScriptDirectory.from_config(config)
from airflow.settings import engine
with EnvironmentContext(
config,
script,
as_sql=True,
) as env:
env.configure(dialect_name=engine.dialect.name)
# This will raise if there are multiple heads
# To resolve, use the command `alembic merge`
script.get_current_head()
def test_default_connections_sort(self):
pattern = re.compile('conn_id=[\"|\'](.*?)[\"|\']', re.DOTALL)
source = inspect.getsource(create_default_connections)
src = pattern.findall(source)
assert sorted(src) == src
def test_check_migrations(self):
# Should run without error. Can't easily test the behaviour, but we can check it works
check_migrations(0)
check_migrations(1)
@mock.patch('alembic.command')
def test_upgradedb(self, mock_alembic_command):
upgradedb()
mock_alembic_command.upgrade.assert_called_once_with(mock.ANY, revision='heads')
@pytest.mark.parametrize(
'from_revision, to_revision',
[('be2bfac3da23', 'e959f08ac86c'), ('ccde3e26fe78', '2e42bb497a22')],
)
def test_offline_upgrade_wrong_order(self, from_revision, to_revision):
with mock.patch('airflow.utils.db.settings.engine.dialect'):
with mock.patch('alembic.command.upgrade'):
with pytest.raises(ValueError, match='to.* revision .* older than .*from'):
upgradedb(from_revision=from_revision, to_revision=to_revision, show_sql_only=True)
@pytest.mark.parametrize(
'to_revision, from_revision',
[
('e959f08ac86c', 'e959f08ac86c'),
],
)
def test_offline_upgrade_revision_nothing(self, from_revision, to_revision):
with mock.patch('airflow.utils.db.settings.engine.dialect'):
with mock.patch('alembic.command.upgrade'):
with redirect_stdout(io.StringIO()) as temp_stdout:
upgradedb(to_revision=to_revision, from_revision=from_revision, show_sql_only=True)
stdout = temp_stdout.getvalue()
assert 'nothing to do' in stdout
@pytest.mark.parametrize(
'from_revision, to_revision',
[("90d1635d7b86", "54bebd308c5f"), ("e959f08ac86c", "587bdf053233")],
)
def test_offline_upgrade_revision(self, from_revision, to_revision):
with mock.patch('airflow.utils.db.settings.engine.dialect'):
with mock.patch('alembic.command.upgrade') as mock_alembic_upgrade:
upgradedb(from_revision=from_revision, to_revision=to_revision, show_sql_only=True)
mock_alembic_upgrade.assert_called_once_with(mock.ANY, f"{from_revision}:{to_revision}", sql=True)
@mock.patch('airflow.utils.db._offline_migration')
@mock.patch('airflow.utils.db._get_current_revision')
def test_offline_upgrade_no_versions(self, mock_gcr, mock_om):
"""Offline upgrade should work with no version / revision options."""
with mock.patch('airflow.utils.db.settings.engine.dialect') as dialect:
dialect.name = "postgresql" # offline migration not supported with postgres
mock_gcr.return_value = '90d1635d7b86'
upgradedb(from_revision=None, to_revision=None, show_sql_only=True)
actual = mock_om.call_args[0][2]
assert re.match(r'90d1635d7b86:[a-z0-9]+', actual) is not None
def test_offline_upgrade_fails_for_migration_less_than_2_0_0_head(self):
with mock.patch('airflow.utils.db.settings.engine.dialect'):
with pytest.raises(ValueError, match='Check that e1a11ece99cc is a valid revision'):
upgradedb(from_revision='e1a11ece99cc', to_revision='54bebd308c5f', show_sql_only=True)
def test_sqlite_offline_upgrade_raises_with_revision(self):
with mock.patch('airflow.utils.db.settings.engine.dialect') as dialect:
dialect.name = 'sqlite'
with pytest.raises(AirflowException, match='Offline migration not supported for SQLite'):
upgradedb(from_revision='e1a11ece99cc', to_revision='54bebd308c5f', show_sql_only=True)
def test_offline_upgrade_fails_for_migration_less_than_2_2_0_head_for_mssql(self):
with mock.patch('airflow.utils.db.settings.engine.dialect') as dialect:
dialect.name = 'mssql'
with pytest.raises(ValueError, match='Check that .* is a valid .* For dialect \'mssql\''):
upgradedb(from_revision='e1a11ece99cc', to_revision='54bebd308c5f', show_sql_only=True)
@mock.patch('airflow.utils.db._offline_migration')
def test_downgrade_sql_no_from(self, mock_om):
downgrade(to_revision='abc', show_sql_only=True, from_revision=None)
actual = mock_om.call_args[1]['revision']
assert re.match(r'[a-z0-9]+:abc', actual) is not None
@mock.patch('airflow.utils.db._offline_migration')
def test_downgrade_sql_with_from(self, mock_om):
downgrade(to_revision='abc', show_sql_only=True, from_revision='123')
actual = mock_om.call_args[1]['revision']
assert actual == '123:abc'
@mock.patch('alembic.command.downgrade')
def test_downgrade_invalid_combo(self, mock_om):
"""can't combine `sql=False` and `from_revision`"""
with pytest.raises(ValueError, match="can't be combined"):
downgrade(to_revision='abc', from_revision='123')
@mock.patch('alembic.command.downgrade')
def test_downgrade_with_from(self, mock_om):
downgrade(to_revision='abc')
actual = mock_om.call_args[1]['revision']
assert actual == 'abc'
@pytest.mark.parametrize('skip_init', [False, True])
@mock.patch('airflow.utils.db.create_global_lock', new=MagicMock)
@mock.patch('airflow.utils.db.drop_airflow_models')
@mock.patch('airflow.utils.db.drop_flask_models')
@mock.patch('airflow.utils.db.drop_airflow_moved_tables')
@mock.patch('airflow.utils.db.initdb')
@mock.patch('airflow.settings.engine.connect')
def test_resetdb(
self,
mock_connect,
mock_init,
mock_drop_moved,
mock_drop_flask,
mock_drop_airflow,
skip_init,
):
session_mock = MagicMock()
resetdb(session_mock, skip_init=skip_init)
mock_drop_airflow.assert_called_once_with(mock_connect.return_value)
mock_drop_flask.assert_called_once_with(mock_connect.return_value)
mock_drop_moved.assert_called_once_with(session_mock)
if skip_init:
mock_init.assert_not_called()
else:
mock_init.assert_called_once_with(session=session_mock)
| {
"content_hash": "4884c95180768dc64d180b59677467af",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 109,
"avg_line_length": 46.49074074074074,
"alnum_prop": 0.6385182234614618,
"repo_name": "danielvdende/incubator-airflow",
"id": "1cba49b4c77efda700ffaa58e845f14ddb182551",
"size": "10830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/utils/test_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
import zipfile
import pytest
from ..checker import check
from tenable.errors import UnexpectedValueError
@pytest.mark.vcr()
def test_system_details(unauth):
s = unauth.system.details()
assert isinstance(s, dict)
check(s, 'ACAS', str)
check(s, 'PasswordComplexity', str)
check(s, 'banner', str)
check(s, 'buildID', str)
check(s, 'freshInstall', str)
check(s, 'headerText', str)
check(s, 'licenseStatus', str)
check(s, 'loginNotifications', str)
check(s, 'logo', str)
check(s, 'releaseID', str)
check(s, 'reportTypes', list)
for i in s['reportTypes']:
check(i, 'attributeSets', list)
check(i, 'enabled', str)
check(i, 'name', str)
check(i, 'type', str)
check(s, 'serverAuth', str)
check(s, 'serverClassification', str)
check(s, 'sessionTimeout', str)
check(s, 'telemetryEnabled', str)
check(s, 'timezones', list)
for i in s['timezones']:
check(i, 'gmtOffset', (int, float))
check(i, 'name', str)
check(s, 'uuid', str)
check(s, 'version', str)
@pytest.mark.vcr()
def test_system_diagnostics_task_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(task=1)
@pytest.mark.vcr()
def test_system_diagnostics_type_unexpectedvalueerror(admin):
with pytest.raises(UnexpectedValueError):
admin.system.diagnostics(task='something else')
@pytest.mark.vcr()
def test_system_diagnostics_options_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(options=1)
@pytest.mark.vcr()
def test_system_diagnostics_options_item_typeerror(admin):
with pytest.raises(TypeError):
admin.system.diagnostics(options=[1])
@pytest.mark.vcr()
def test_system_diagnostics_options_item_unexpectedvalueerror(admin):
with pytest.raises(UnexpectedValueError):
admin.system.diagnostics(options=['something else'])
@pytest.mark.vcr()
def test_system_diagnostics_success(admin):
fobj = admin.system.diagnostics()
assert zipfile.is_zipfile(fobj)
@pytest.mark.vcr()
def test_system_current_locale_success(admin):
l = admin.system.current_locale()
assert isinstance(l, dict)
check(l, 'code', str)
check(l, 'description', str)
check(l, 'name', str)
@pytest.mark.vcr()
def test_system_list_locales_success(admin):
l = admin.system.list_locales()
assert isinstance(l, dict)
for key in l.keys():
check(l[key], 'code', str)
check(l[key], 'name', str)
@pytest.mark.vcr()
def test_system_set_locale_locale_typeerror(admin):
with pytest.raises(TypeError):
admin.system.set_locale(1)
@pytest.mark.vcr()
@pytest.mark.skip(reason='This appears to be 1-way, need a sacrificial system to test with')
def test_system_set_locale_success(admin):
locales = admin.system.list_locales()
assert admin.system.set_locale('ja') == 'ja'
@pytest.mark.vcr()
def test_system_status_success(admin):
s = admin.system.status()
assert isinstance(s, dict)
check(s, 'diagnosticsGenerateState', str)
check(s, 'diagnosticsGenerated', int)
check(s, 'statusDisk', str)
check(s, 'statusJava', str)
check(s, 'statusLastChecked', str)
check(s, 'statusRPM', str)
check(s, 'statusThresholdDisk', str)
| {
"content_hash": "c5cee4207f399a29002aa40105821f64",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 92,
"avg_line_length": 28.353448275862068,
"alnum_prop": 0.6704165399817573,
"repo_name": "tenable/pyTenable",
"id": "45abdc2a351b864ce2cc4e411b482f1fbbde2bc7",
"size": "3289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sc/test_system.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2769266"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from awx.main.models import NotificationTemplate
def compare_with_encrypted(model_config, param_config):
'''Given a model_config from the database, assure that this is consistent
with the config given in the notification_configuration parameter
this requires handling of password fields
'''
for key, model_val in model_config.items():
param_val = param_config.get(key, 'missing')
if isinstance(model_val, str) and (model_val.startswith('$encrypted$') or param_val.startswith('$encrypted$')):
assert model_val.startswith('$encrypted$') # must be saved as encrypted
assert len(model_val) > len('$encrypted$')
else:
assert model_val == param_val, 'Config key {0} did not match, (model: {1}, input: {2})'.format(
key, model_val, param_val
)
@pytest.mark.django_db
def test_create_modify_notification_template(run_module, admin_user, organization):
nt_config = {
'username': 'user',
'password': 'password',
'sender': 'foo@invalid.com',
'recipients': ['foo2@invalid.com'],
'host': 'smtp.example.com',
'port': 25,
'use_tls': False, 'use_ssl': False,
'timeout': 4
}
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
nt = NotificationTemplate.objects.get(id=result['id'])
compare_with_encrypted(nt.notification_configuration, nt_config)
assert nt.organization == organization
# Test no-op, this is impossible if the notification_configuration is given
# because we cannot determine if password fields changed
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.pop('changed', None), result
# Test a change in the configuration
nt_config['timeout'] = 12
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
nt.refresh_from_db()
compare_with_encrypted(nt.notification_configuration, nt_config)
@pytest.mark.django_db
def test_invalid_notification_configuration(run_module, admin_user, organization):
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='email',
notification_configuration={},
), admin_user)
assert result.get('failed', False), result.get('msg', result)
assert 'Missing required fields for Notification Configuration' in result['msg']
@pytest.mark.django_db
def test_deprecated_to_modern_no_op(run_module, admin_user, organization):
nt_config = {
'url': 'http://www.example.com/hook',
'headers': {
'X-Custom-Header': 'value123'
}
}
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='webhook',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert result.pop('changed', None), result
result = run_module('tower_notification', dict(
name='foo-notification-template',
organization=organization.name,
notification_type='webhook',
notification_configuration=nt_config,
), admin_user)
assert not result.get('failed', False), result.get('msg', result)
assert not result.pop('changed', None), result
| {
"content_hash": "9493a0755be217970779e8ef3379a13f",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 119,
"avg_line_length": 38.549549549549546,
"alnum_prop": 0.6599672820752512,
"repo_name": "GoogleCloudPlatform/sap-deployment-automation",
"id": "9d916d1dc9ef07645e8ccf388de9dc7ca9e00353",
"size": "4279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/github.com/ansible/awx/awx_collection/test/awx/test_notification.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from __future__ import absolute_import
from . import imageGradients
from . import objectDetection
data_extensions = [
# Set show=True if extension should be shown by default
# on DIGITS home page. These defaults can be changed by
# editing DIGITS config option 'data_extension_list'
{'class': imageGradients.DataIngestion, 'show': False},
{'class': objectDetection.DataIngestion, 'show': True},
]
def get_extensions(show_all=False):
"""
return set of data data extensions
"""
return [extension['class']
for extension in data_extensions
if show_all or extension['show']]
def get_extension(extension_id):
"""
return extension associated with specified extension_id
"""
for extension in data_extensions:
extension_class = extension['class']
if extension_class.get_id() == extension_id:
return extension_class
return None
| {
"content_hash": "e9ebec5ad95ad9282a02b8b8f79a85b1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 59,
"avg_line_length": 29.125,
"alnum_prop": 0.6716738197424893,
"repo_name": "jmancewicz/DIGITS",
"id": "9e53b9413581d6c556cf3a9d7e2527446c77fbf5",
"size": "996",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "digits/extensions/data/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2557"
},
{
"name": "HTML",
"bytes": "224818"
},
{
"name": "JavaScript",
"bytes": "132563"
},
{
"name": "Lua",
"bytes": "12103"
},
{
"name": "Python",
"bytes": "727804"
},
{
"name": "Shell",
"bytes": "4225"
}
],
"symlink_target": ""
} |
import sys
import SimpleITK as sitk
import os
# verify that we have the correct number of arguments
if len(sys.argv) != 5:
sys.stderr.write(
"Usage: prog inputFile outputFile replaceValue upperThreshold\n"
)
exit(1)
# copy the arguments in to variables
inputFileName = sys.argv[1]
outputFileName = sys.argv[2]
replaceValue = int(sys.argv[3])
upperThreshold = float(sys.argv[4])
# Read the file into an sitkImage
image = sitk.ReadImage(inputFileName)
# Threshold the value [0,2), results in values inside the range 1, 0 otherwise
boundary = sitk.BinaryThreshold(image, 0, upperThreshold, 1, 0)
boundary = sitk.BinaryMorphologicalClosing(
boundary, [1] * image.GetDimension()
)
# Remove any label pixel not connected to the boarder
boundary = sitk.BinaryGrindPeak(boundary)
boundary = sitk.Cast(boundary, image.GetPixelID())
# Multiply, the input image by not the boarder.
# This will multiply the image by 0 or 1, where 0 is the
# boarder. Making the board 0
image = image * ~boundary
# add the replace value to the pixel on the board
image = image + (boundary * replaceValue)
if "SITK_NOSHOW" not in os.environ:
sitk.Show(image, "Boarder Segmentation")
| {
"content_hash": "3f0e055956cf5d51019060a569469dbc",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 28.357142857142858,
"alnum_prop": 0.7380352644836272,
"repo_name": "SimpleITK/SimpleITK",
"id": "2f684533933a05610731ec29be4dbda362d28c85",
"size": "1955",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Examples/Python/BoarderSegmentation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "34004"
},
{
"name": "C#",
"bytes": "5324"
},
{
"name": "C++",
"bytes": "2086338"
},
{
"name": "CMake",
"bytes": "274252"
},
{
"name": "CSS",
"bytes": "31103"
},
{
"name": "Dockerfile",
"bytes": "1074"
},
{
"name": "HTML",
"bytes": "3744"
},
{
"name": "Java",
"bytes": "7242"
},
{
"name": "Lua",
"bytes": "25805"
},
{
"name": "Makefile",
"bytes": "145"
},
{
"name": "Python",
"bytes": "202617"
},
{
"name": "R",
"bytes": "54684"
},
{
"name": "SWIG",
"bytes": "2757554"
},
{
"name": "Shell",
"bytes": "109644"
},
{
"name": "Tcl",
"bytes": "3501"
},
{
"name": "TeX",
"bytes": "102861"
}
],
"symlink_target": ""
} |
class Solver:
pass
| {
"content_hash": "2673c6d148574c4eece68f17ab3dec09",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 13,
"avg_line_length": 11.5,
"alnum_prop": 0.6521739130434783,
"repo_name": "wenkaiqiu/petal",
"id": "94bab53b306139d1ef2a2e0cb67ba05e8ec23c44",
"size": "23",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/solver/solver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74747"
}
],
"symlink_target": ""
} |
import krpc
import asyncio
import logging
from common.autonumber import AutoNumber
import common.protocol as protocol
class KSPConnection:
# TODO: Detect disconnect from krpc-server
def __init__(self, config, commander):
self.name = config.get('Name', "command center")
self.address = config.get('KSPIp', '127.0.0.1')
self.rpc_port = int(config.get('RpcPort', 50000))
self.stream_port = int(config.get('StreamPort', 50001))
self.commander = commander
self.conn = None
self.logger = logging.getLogger(self.get_name())
self.ok = False
self.streams = {}
self.state = {}
self.current_scene = None
async def start(self):
while True:
try:
# TODO: this does not time-out but just hangs the loop
# if the server is up but not accepting connections
self.conn = krpc.connect(
name=self.name,
address=self.address,
rpc_port=self.rpc_port,
stream_port=self.stream_port)
except:
self.logger.info(
"Failed to connect to KSP, trying again in 5 seconds.")
await asyncio.sleep(5)
else:
self.logger.info("Connected to KSP.")
self.ok = True
try:
self.commander.send_status()
except protocol.NoConnectionError:
# If we connect to ksp before the coordinator
# status will be sent on connection event
pass
finally:
break
self.initialise()
def initialise(self):
self.enter_new_scene(self.conn.krpc.current_game_scene)
async def send_on_connection(self, message):
await self.commander.connectionevent.wait()
self.commander.send_data_to_coordinator(message)
def handle_data_from_coordinator(self, message):
pass
def stop(self):
if self.conn != None:
self.conn.close()
def handle_disconnect(self):
self.logger.error("Handle disconnect is not implemented")
def get_name(self):
return self.__class__.__name__
def get_scene(self):
if self.state[protocol.KrpcInfo.GAME_SCENE] == self.conn.krpc.GameScene.flight:
return protocol.GameScene.FLIGHT
else:
return protocol.GameScene.SPACE_CENTER
# State handlers
def enter_new_scene(self, new_scene):
self.exit_current_scene()
self.state[protocol.KrpcInfo.GAME_SCENE] = new_scene
datatype = protocol.KrpcInfo.GAME_SCENE
if new_scene == self.conn.krpc.GameScene.flight:
self.current_scene = asyncio.ensure_future(self.flight())
msgdata = (datatype, protocol.GameScene.FLIGHT)
else:
self.current_scene = asyncio.ensure_future(self.space_center())
msgdata = (datatype, protocol.GameScene.SPACE_CENTER)
message = protocol.create_message(
protocol.MessageType.KRPC_INFO_MSG,msgdata)
try:
self.commander.send_data_to_coordinator(message)
except protocol.NoConnectionError:
asyncio.ensure_future(self.send_on_connection(message))
def exit_current_scene(self):
pass
async def space_center(self):
self.logger.debug("Entering space center")
while True:
await asyncio.sleep(0.11) # TODO in config
scene = self.conn.krpc.current_game_scene
if self.state[protocol.KrpcInfo.GAME_SCENE] != scene:
break
self.enter_new_scene(scene)
async def flight(self):
self.logger.debug("Entering flight")
while True:
await asyncio.sleep(0.1) # TODO in config
scene = self.conn.krpc.current_game_scene
if self.state[protocol.KrpcInfo.GAME_SCENE] != scene:
break
self.enter_new_scene(scene)
def exit_scene(self):
self.logger.info("Exciting state %s", )
| {
"content_hash": "dffe482ecc42d5aca9a1cc860cbd258d",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 87,
"avg_line_length": 34.6890756302521,
"alnum_prop": 0.5869670542635659,
"repo_name": "fayoh/KSP-Control",
"id": "e58304ffa28106f4e172d5c2653e722e97ae2462",
"size": "4128",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commander/kspconn/kspconn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26822"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
requires = [
'affine',
'appdirs',
'dask',
'gdal', #: Install w/ Conda for best results
'geopandas',
'mapkit>=1.2.6',
'pangaea',
'psycopg2',
'pyyaml',
'rapidpy',
'sqlalchemy',
'timezonefinder',
'utm',
'wrf-python'
]
setup(name='gsshapy',
version='2.3.9',
description='An SQLAlchemy ORM for GSSHA model files and a toolkit'
' to convert gridded input into GSSHA input.',
long_description='Documentation can be found at '
'http://gsshapy.readthedocs.io. \n\n'
'.. image:: https://zenodo.org/badge/26494532.svg \n'
' :target: '
'https://zenodo.org/badge/latestdoi/26494532',
author='Nathan Swain, Alan D. Snow, and Scott D. Christensen',
author_email='nathan.swain@byu.net',
url='https://github.com/CI-WATER/gsshapy',
license='BSD 3-Clause License',
keywords='GSSHA, database, object relational model',
packages=find_packages(),
package_data={'': ['grid/land_cover/*.txt']},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
],
install_requires=requires,
extras_require={
'tests': [
'coveralls',
'pytest',
'pytest-cov',
],
'docs': [
'mock',
'sphinx',
'sphinx_rtd_theme',
'sphinxcontrib-napoleon',
]
},
)
| {
"content_hash": "61ae31ea5a9c1ed7bc8c26a348bf4528",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 32.3,
"alnum_prop": 0.5128998968008256,
"repo_name": "CI-WATER/gsshapy",
"id": "1aa25be7db313d625ea2932629ca09406971b304",
"size": "1938",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gosu",
"bytes": "87223"
},
{
"name": "Jupyter Notebook",
"bytes": "24572"
},
{
"name": "Python",
"bytes": "755001"
},
{
"name": "SMT",
"bytes": "152"
}
],
"symlink_target": ""
} |
from django.db import models
class Area(models.Model):
name = models.CharField(
max_length=140
)
def __str__(self):
return self.name
| {
"content_hash": "6d48c5f2f3cd68f989e17273454dcadc",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 28,
"avg_line_length": 16.4,
"alnum_prop": 0.6036585365853658,
"repo_name": "aniruddha-adhikary/bookit",
"id": "a95118209730353550ebf7ebe4ccac7d64d64fed",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bookit/geo/models/area.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2145"
},
{
"name": "HTML",
"bytes": "43525"
},
{
"name": "JavaScript",
"bytes": "3902"
},
{
"name": "Python",
"bytes": "73443"
},
{
"name": "Shell",
"bytes": "4196"
}
],
"symlink_target": ""
} |
from . import context
# -----------------------------------------------------------------------------
def get_response_meta():
return context.get("response_meta")
def update_response_meta(next_meta):
if next_meta is None:
return
meta = get_response_meta()
if meta is None:
meta = {}
meta.update(next_meta)
context.set("response_meta", meta)
| {
"content_hash": "b8a6b15449f91025da8d718709ce2176",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 79,
"avg_line_length": 20.42105263157895,
"alnum_prop": 0.49742268041237114,
"repo_name": "4Catalyzer/flask-resty",
"id": "ff784bbcf21f2bcb713d92e0f59d2bce3222136b",
"size": "388",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flask_resty/meta.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257329"
}
],
"symlink_target": ""
} |
"""Scraper for Connecticut Supreme Court
CourtID: conn
Court Short Name: Conn.
Author: Asadullah Baig <asadullahbeg@outlook.com>
History:
- 2014-07-11: created
- 2014-08-08, mlr: updated to fix InsanityError on case_dates
- 2014-09-18, mlr: updated XPath to fix InsanityError on docket_numbers
- 2015-06-17, mlr: made it more lenient about date formatting
"""
from datetime import date, datetime
import re
from juriscraper.OpinionSite import OpinionSite
from juriscraper.lib.string_utils import clean_string
class Site(OpinionSite):
def __init__(self, *args, **kwargs):
super(Site, self).__init__(*args, **kwargs)
self.crawl_date = date.today()
self.url = 'http://www.jud.ct.gov/external/supapp/archiveAROsup{year}.htm'.format(
year=self.crawl_date.strftime("%y"))
self.court_id = self.__module__
def _get_case_names(self):
case_names = []
path = '//*[@id="AutoNumber1"]/tr[2]/td/table/tr/td//ul//text()'
for s in self.html.xpath(path):
if re.search(u'[–-]', s):
case_names.append(clean_string(s))
return case_names
def _get_download_urls(self):
return list(self.html.xpath('//@href[contains(., ".pdf")]'))
def _get_case_dates(self):
dates = []
for title in self.html.xpath('//table[@id="AutoNumber1"]/tr[2]/td/table/tr/td//b//text()'):
count = len(title.getparent().xpath("following::ul[1]//a/@href[contains(., 'pdf')]"))
date_string = title.split()[-1].strip(':')
for format in ['%m/%d/%y', '%m/%d/%Y', None]:
try:
dates.extend([datetime.strptime(date_string, format).date()] * count)
break
except ValueError:
continue
return dates
def _get_docket_numbers(self):
docket_numbers = []
for d in self.html.xpath("//a[contains(./@href, '.pdf')]//text()"):
if re.search(r"(A?S?C\d{3,5})", d):
docket_numbers.append(d)
return docket_numbers
def _get_precedential_statuses(self):
return ['Published'] * len(self.case_dates)
| {
"content_hash": "10099bec65c5766a38b01d77aad0cac0",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 99,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.5857798165137614,
"repo_name": "m4h7/juriscraper",
"id": "0c56b3e53ad15297fe50674684a410d67eda7bf9",
"size": "2197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "juriscraper/opinions/united_states/state/conn.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "27160373"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "623951"
}
],
"symlink_target": ""
} |
"""Admin classes"""
from import_export.resources import ModelResource
class InvenTreeResource(ModelResource):
"""Custom subclass of the ModelResource class provided by django-import-export"
Ensures that exported data are escaped to prevent malicious formula injection.
Ref: https://owasp.org/www-community/attacks/CSV_Injection
"""
def export_resource(self, obj):
"""Custom function to override default row export behaviour.
Specifically, strip illegal leading characters to prevent formula injection
"""
row = super().export_resource(obj)
illegal_start_vals = ['@', '=', '+', '-', '@', '\t', '\r', '\n']
for idx, val in enumerate(row):
if type(val) is str:
val = val.strip()
# If the value starts with certain 'suspicious' values, remove it!
while len(val) > 0 and val[0] in illegal_start_vals:
# Remove the first character
val = val[1:]
row[idx] = val
return row
| {
"content_hash": "d6f43191ff9e9f48d79424c4d1b61403",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 83,
"avg_line_length": 32.36363636363637,
"alnum_prop": 0.596441947565543,
"repo_name": "inventree/InvenTree",
"id": "2d5798a9d1eec8e0995782df60ab1f4a2b8cde62",
"size": "1068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "InvenTree/InvenTree/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246444"
},
{
"name": "Dockerfile",
"bytes": "7169"
},
{
"name": "HTML",
"bytes": "586821"
},
{
"name": "JavaScript",
"bytes": "1970070"
},
{
"name": "Procfile",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2606104"
},
{
"name": "Shell",
"bytes": "27115"
}
],
"symlink_target": ""
} |
from share.provider import ProviderAppConfig
from .harvester import SpringerHarvester
class AppConfig(ProviderAppConfig):
name = 'providers.com.springer'
version = '0.0.1'
title = 'springer'
long_title = 'Springer'
home_page = 'http://www.springer.com/us/'
harvester = SpringerHarvester
| {
"content_hash": "d5eb439563ae4a4ce1760ed3256077eb",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 28.454545454545453,
"alnum_prop": 0.7188498402555911,
"repo_name": "zamattiac/SHARE",
"id": "5ce0671ba862dbb61098bece979a8348111ec4e9",
"size": "313",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/com/springer/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import *
from jellyroll.views import tags
urlpatterns = patterns('',
url(r'^$', tags.tag_list, {}, name='jellyroll_tag_list'),
url(r'^(?P<tag>[-\.\'\:\w]+)/$',tags.tag_item_list, {}, name="jellyroll_tag_item_list"),
)
| {
"content_hash": "d8fdb7fed89ebffbfdc48209edc6f429",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 92,
"avg_line_length": 28.88888888888889,
"alnum_prop": 0.6230769230769231,
"repo_name": "jacobian-archive/jellyroll",
"id": "046372801ac8d9100879eac586c983fd6ba696fa",
"size": "260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/jellyroll/urls/tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "4562"
},
{
"name": "Python",
"bytes": "113908"
}
],
"symlink_target": ""
} |
import MySQLdb
import simplejson as json
from django.shortcuts import render
from django.http import HttpResponse
from django.http import Http404
from django.core.exceptions import ValidationError
from mozdns.domain.models import Domain
from core.search.compiler.django_compile import search_type
from core.utils import locked_function
from mozdns.record.utils import get_obj_meta
def record_search(request, record_type=None):
if not record_type:
record_type = 'A'
return render(request, 'record/record_search.html', {
'record_type': record_type
})
def record(request, record_type='', record_pk=''):
domains = Domain.objects.filter(is_reverse=False)
if not record_type:
record_type = 'A'
return render(request, 'record/record.html', {
'record_type': record_type,
'record_pk': record_pk,
'domains': json.dumps([domain.name for domain in domains]),
})
def record_delete(request, record_type='', record_pk=''):
if request.method != 'POST':
raise Http404
obj_meta = get_obj_meta(record_type)
try:
object_ = obj_meta.Klass.objects.get(pk=record_pk)
except obj_meta.Klass.DoesNotExist:
error = "Could not find that object."
return HttpResponse(json.dumps({'success': False, 'error': error}))
try:
object_.delete()
except ValidationError, e:
error = e.messages[0]
return HttpResponse(json.dumps({'success': False, 'error': error}))
return HttpResponse(json.dumps({'success': True}))
def record_search_ajax(request):
"""
This function will return a list of records matching the 'query' of type
'record_type'. It's used for ajaxy stuff.
"""
query = request.GET.get('query', '')
record_type = request.GET.get('record_type', '')
obj_meta = get_obj_meta(record_type)
if not record_type:
raise Http404
if not query and record_type:
return render(request, 'record/record_search_results.html', {
'objs': [],
'record_type': record_type,
})
if not obj_meta.Klass:
raise Http404
records, error = search_type(query, record_type)
if error:
records = []
else:
try:
records = records[:50]
except MySQLdb.OperationalError, e:
if "Got error " in str(e) and " from regexp" in str(e):
# This is nasty. If the user is using an invalid regex patter,
# the db might shit a brick
records = []
else:
raise
return render(request, 'record/record_search_results.html', {
'objs': records,
'record_type': record_type,
})
def record_ajax(request):
# This function is pretty much a router
if request.method == 'POST':
return _record_post(request)
else:
record_type = request.GET.get('record_type', '')
record_pk = request.GET.get('record_pk', '')
obj_meta = get_obj_meta(record_type)()
return obj_meta.get(request, record_type, record_pk)
@locked_function('inventory.record_lock', 10)
def _record_post(request):
record_type = request.POST.get('record_type', '')
record_pk = request.POST.get('record_pk', '')
obj_meta = get_obj_meta(record_type)()
return obj_meta.post(request, record_type, record_pk)
| {
"content_hash": "9d604de3b1aa1f9b4d33a535d34d4091",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 30.889908256880734,
"alnum_prop": 0.626967626967627,
"repo_name": "rtucker-mozilla/mozilla_inventory",
"id": "a5581b88570952f64eee8f4cf3e53e330d20694a",
"size": "3367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozdns/record/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "JavaScript",
"bytes": "1485560"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1960271"
},
{
"name": "Ruby",
"bytes": "1459"
},
{
"name": "Shell",
"bytes": "8766"
}
],
"symlink_target": ""
} |
"""
====================================================
Extracting AFQ tract profiles from segmented bundles
====================================================
In this example, we will extract the values of a statistic from a
volume, using the coordinates along the length of a bundle. These are called
`tract profiles`
One of the challenges of extracting tract profiles is that some of the
streamlines in a bundle may diverge significantly from the bundle in some
locations. To overcome this challenge, we will use a strategy similar to that
described in [Yeatman2012]_: We will weight the contribution of each streamline
to the bundle profile based on how far this streamline is from the mean
trajectory of the bundle at that location.
"""
import dipy.stats.analysis as dsa
import dipy.tracking.streamline as dts
from dipy.segment.clustering import QuickBundles
from dipy.segment.metric import (AveragePointwiseEuclideanMetric,
ResampleFeature)
from dipy.data.fetcher import get_two_hcp842_bundles
import dipy.data as dpd
from dipy.io.streamline import load_trk
from dipy.io.image import load_nifti
import matplotlib.pyplot as plt
import numpy as np
import os.path as op
"""
To get started, we will grab the bundles that were extracted in the bundle
extraction example. If the example has not been run yet, these files don't
yet exist, and we'll need to run that example:
"""
if not (op.exists("CST_L.trk") and
op.exists("AF_L.trk") and
op.exists("slr_transform.npy")):
import bundle_extraction
"""
Either way, we can use the `dipy.io` API to read in the bundles from file.
`load_trk` returns both the streamlines, as well as header information.
"""
cst_l = load_trk("CST_L.trk", "same", bbox_valid_check=False).streamlines
af_l = load_trk("AF_L.trk", "same", bbox_valid_check=False).streamlines
transform = np.load("slr_transform.npy")
"""
In the next step, we need to make sure that all the streamlines in each bundle
are oriented the same way. For example, for the CST, we want to make sure that
all the bundles have their cortical termination at one end of the streamline.
This is that when we later extract values from a volume, we won't have
different streamlines going in opposite directions.
To orient all the streamlines in each bundles, we will create standard
streamlines, by finding the centroids of the left AF and CST bundle models.
The advantage of using the model bundles is that we can use the same standard
for different subjects, which means that we'll get roughly the same orientation
"""
model_af_l_file, model_cst_l_file = get_two_hcp842_bundles()
model_af_l = load_trk(model_af_l_file, "same",
bbox_valid_check=False).streamlines
model_cst_l = load_trk(model_cst_l_file, "same",
bbox_valid_check=False).streamlines
feature = ResampleFeature(nb_points=100)
metric = AveragePointwiseEuclideanMetric(feature)
"""
Since we are going to include all of the streamlines in the single cluster
from the streamlines, we set the threshold to `np.inf`. We pull out the
centroid as the standard.
"""
qb = QuickBundles(np.inf, metric=metric)
cluster_cst_l = qb.cluster(model_cst_l)
standard_cst_l = cluster_cst_l.centroids[0]
cluster_af_l = qb.cluster(model_af_l)
standard_af_l = cluster_af_l.centroids[0]
"""
We use the centroid streamline for each atlas bundle as the standard to orient
all of the streamlines in each bundle from the individual subject. Here, the
affine used is the one from the transform between the atlas and individual
tractogram. This is so that the orienting is done relative to the space of the
individual, and not relative to the atlas space.
"""
oriented_cst_l = dts.orient_by_streamline(cst_l, standard_cst_l)
oriented_af_l = dts.orient_by_streamline(af_l, standard_af_l)
"""
Read volumetric data from an image corresponding to this subject.
For the purpose of this, we've extracted only the FA within the bundles in
question, but in real use, this is where you would add the FA map of your
subject.
"""
files, folder = dpd.fetch_bundle_fa_hcp()
fa, fa_affine = load_nifti(op.join(folder, "hcp_bundle_fa.nii.gz"))
"""
Calculate weights for each bundle:
"""
w_cst_l = dsa.gaussian_weights(oriented_cst_l)
w_af_l = dsa.gaussian_weights(oriented_af_l)
"""
And then use the weights to calculate the tract profiles for each bundle
"""
profile_cst_l = dsa.afq_profile(fa, oriented_cst_l, fa_affine,
weights=w_cst_l)
profile_af_l = dsa.afq_profile(fa, oriented_af_l, fa_affine,
weights=w_af_l)
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.plot(profile_cst_l)
ax1.set_ylabel("Fractional anisotropy")
ax1.set_xlabel("Node along CST")
ax2.plot(profile_af_l)
ax2.set_xlabel("Node along AF")
fig.savefig("tract_profiles")
"""
.. figure:: tract_profiles.png
:align: center
Bundle profiles for the fractional anisotropy in the left CST (left) and left
AF (right).
"""
"""
References
----------
.. [Yeatman2012] Yeatman, Jason D., Robert F. Dougherty, Nathaniel J. Myall,
Brian A. Wandell, and Heidi M. Feldman. 2012. "Tract Profiles of White
Matter Properties: Automating Fiber-Tract Quantification" PloS One 7 (11):
e49790.
.. [Garyfallidis17] Garyfallidis et al. Recognition of white matter bundles
using local and global streamline-based registration and clustering,
Neuroimage, 2017.
.. [Garyfallidis12] Garyfallidis E. et al., QuickBundles a method for
tractography simplification, Frontiers in Neuroscience, vol 6, no 175, 2012.
"""
| {
"content_hash": "b1369fc677fc4abd7cc2bc5d95db2cd3",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 80,
"avg_line_length": 32.748538011695906,
"alnum_prop": 0.7194642857142857,
"repo_name": "FrancoisRheaultUS/dipy",
"id": "0eaf66168b0982f68fa635dcb7a59f76750e33f8",
"size": "5600",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc/examples/afq_tract_profiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2932"
},
{
"name": "Makefile",
"bytes": "3686"
},
{
"name": "Python",
"bytes": "3246086"
}
],
"symlink_target": ""
} |
'''
Created on 2014-07-16
@summary: Trellis TEA use pure python
@author: fiefdx
'''
from distutils.core import setup
setup(name='pytea',
version='1.0.2',
author = 'fiefdx',
author_email = 'fiefdx@gmail.com',
package_dir={'pytea': 'src'},
packages=['pytea'],
) | {
"content_hash": "f6a428916f4284709d92cfb49b151df0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 19.8,
"alnum_prop": 0.6026936026936027,
"repo_name": "fiefdx/pytea",
"id": "38bbd6e96323d1dc65e0352cdedb7d3a7f70fe9a",
"size": "321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7047"
}
],
"symlink_target": ""
} |
""" eaf3D.py
This python script performs Algorithm 4 described in C.M. Fonseca et. al.,
"On the Computation of the Empirical Attainment Function," (2011):
Algorithm 4. EAF computation in three dimensions
Basic operations defined in article:
- top(Q) returns the element at the top of a queue
- pop(Q) retrieves the element at the top and removes it from Q
--> same as python list function pop()
- input_set(p) returns the index of the input set containing p
Search operations defined in article:
- floor_x(p, X*): the point q belonging to X* with the greatest q_x <= p_x
- lower_x(p, X*): the point q belonging to X* with the greatest q_x < p_x
- ceiling_x(p, X*): the point q belonging to X* with the least q_x >= p_x
- higher_x(p, X*): the point q belonging to X* with the least q_x > p_x
These and their y-coordinate partners can be performed in logarithmic time
using 2n data structures on a height-balanced binary search tree. These are
implemented in avltree_eaf3d.py.
Indices in the algorithm are translated here from (1,...,n) to (0,...,n-1).
kwargs:
- SuperLevels: list of integers corresponding to the desired
attainment surfaces to be present in the graph
- MethodName: string naming the method being considered
- opcat: list of 3 strings naming each objective
- opal: integer correlating to color position in colors array
Refer to license.txt for permissions.
"""
from __future__ import print_function
import avltree_eaf3d as bst
import numpy as np
import pandas as pd
import seaborn
from copy import deepcopy
from glob import glob
from matplotlib import pyplot, ticker
from operator import attrgetter
from stack import Stack
# Set environment for graphs
colors = ['#49ADA2', '#7797F4', '#C973F4', '#EF6E8B', '#FFAA6C']
class EAF_3D:
# This class takes a sequence of nondominated point sets and transforms the
# data into a sequence of summary attainment surfaces
def __init__(self, sets):
self.n = len(sets)
self.x, m = multiset_sum(sets)
self.a_tracker = []
self.tmax = 0
# Q is X sorted in ascending order of the z coordinate
self.qstack = Stack()
xintoq = sorted(self.x.values(), key=attrgetter('z'))
for i in range(len(xintoq)):
self.qstack.push(xintoq[i])
# Set initial points for sentinels (to simulate infinity)
big_pos_value = 10E10
big_neg_value = -1 * big_pos_value
p0_array = np.array([big_neg_value, big_pos_value, big_neg_value])
self.p0 = ApproxPoint(None, None, p0_array)
p1_array = np.array([big_pos_value, big_neg_value, big_neg_value])
self.p1 = ApproxPoint(None, None, p1_array)
lsa, lstar, xstar = init_surface_sentinels(self.n, self.p0, self.p1)
self.lsa = lsa
self.lstar = lstar
self.xstar = xstar
self.initialize()
def initialize(self):
# This module performs the initial steps of EAF-3D
p = self.qstack.pop()
nondominated = verify_nondominated(p.point, self.x)
while not nondominated:
p = self.qstack.pop()
nondominated = verify_nondominated(p.point, self.x)
j = p.input_set()
# insert p into X*_j
self.xstar[j].insert(p)
# insert p into L*_1
self.lstar[0].insert(p)
self.a_tracker.append(j)
def transform(self):
# This module performs the while loop of Algorithm 4.
while not self.qstack.isEmpty():
p = self.qstack.pop()
j = p.input_set()
q = self.xstar[j].floor_x(p)
if p.y < q.y:
t, tmin = self.tmax, 0
s, tmin = self.find_attainment_point(p, q, t, tmin)
s = self.compare_p_to_surfaces(s, p, q, j, tmin)
self.submit_points_lstar(s, p, q, tmin)
self.submit_to_xstar(p, j)
if j not in self.a_tracker:
self.a_tracker.append(j)
self.tmax = min(self.tmax + 1, self.n - 2)
self.fill_attainment_surfaces()
def find_attainment_point(self, p, q, t, tmin):
# This module seeks output points r that X_j has not attained such
# such that (px, pz) >= (rx, rz) and py < ry. Then s=(px, ry, pz) is
# an element of J_t+1
s = [None for _ in range(self.n)]
while t >= tmin:
r = self.lstar[t].floor_x(p)
if r.y <= p.y:
tmin = t + 1
elif r.y < q.y:
s[t] = ApproxPoint(None, None, np.array([p.x, r.y, p.z]))
else:
s[t] = self.lstar[t].lower_y(q)
t -= 1
return s, tmin
def compare_p_to_surfaces(self, s, p, q, j, tmin):
# This module seeks all output points belonging to L_t that Xj has not
# attained and determines elements of L_t+1
# Repeat this loop until q.y <= p.y
while q.y > p.y:
q = self.xstar[j].higher_x(q)
b = max(p.y, q.y)
for t in range(self.tmax, tmin - 1, -1):
while s[t].y >= b and (s[t].y > b or b > p.y):
if s[t].x >= q.x:
s[t] = self.lstar[t].lower_y(q)
else:
# Make new point for submission
combo_point = ApproxPoint(None, None, np.array([s[t].x, s[t].y, p.z]))
self.submit_to_lstar(combo_point, t+1)
s[t] = self.lstar[t].higher_x(s[t])
return s
def submit_points_lstar(self, s, p, q, tmin):
# This module finds output points similar to compare_p_to_surfaces,
# except the roles of x and y are reversed.
for t in range(self.tmax, tmin - 1, -1):
if s[t].x < q.x:
# Make new point for submission
point = ApproxPoint(None, None, np.array([s[t].x, p.y, p.z]))
self.submit_to_lstar(point, t+1)
self.submit_to_lstar(p, tmin)
def submit_to_lstar(self, u, t):
# This algorithm (Algorithm 5 in article) submits point u to L*_t
v = self.lstar[t].floor_x(u)
if u.y < v.y:
omegas = self.lstar[t].list_nodes_domxy(u)
while omegas:
if u.z > omegas[0].point.z:
self.lsa[t].append(omegas[0].point)
self.lstar[t].remove_node(omegas[0])
omegas = self.lstar[t].list_nodes_domxy(u)
self.lstar[t].insert(u)
def submit_to_xstar(self, u, j):
# This algorithm submits point u to X*_j
v = self.xstar[j].floor_x(u)
if u.y < v.y:
omegas = self.xstar[j].list_nodes_domxy(u)
while omegas:
self.xstar[j].remove_node(omegas[0])
omegas = self.xstar[j].list_nodes_domxy(u)
self.xstar[j].insert(u)
def fill_attainment_surfaces(self):
# This algorithm performs line 48 (in the article) of the
# overall algorithm.
for t in range(self.n):
leaves = [self.lstar[t].root]
while any(leaves):
for f in range(len(leaves)):
if leaves[f]:
approxpoint = leaves[f].point
sent1 = np.array_equal(approxpoint.point, self.p0.point)
sent2 = np.array_equal(approxpoint.point, self.p1.point)
# Do not include sentinels
if not sent1 and not sent2:
self.lsa[t].append(approxpoint)
leaves = self.lstar[t].next_tree_row(leaves)
def make_lsa_dataframe(self, **kwargs):
# This module transforms the attainment surfaces into a pandas
# dataframe with columns: x, y, z, super level set
# If superlevel set numbers are not requested in kwargs, select all
superlevels = list(range(self.n))
opcat = ['Objective 1', 'Objective 2', 'Objective 3']
if 'SuperLevels' in kwargs:
superlevels = kwargs['SuperLevels']
if 'opcat' in kwargs:
opcat = kwargs['opcat']
dlist_lsa = []
for t in superlevels:
for p in range(len(self.lsa[t])):
# Add point to dict
dlist_lsa.append({
opcat[0]: self.lsa[t][p].point[0],
opcat[1]: self.lsa[t][p].point[1],
opcat[2]: self.lsa[t][p].point[2],
'SuperLevel t/n [%]': int(100 * (t + 1.0) / self.n)
})
df_lsa = pd.DataFrame(dlist_lsa)
return df_lsa
def graph_eaf(self, **kwargs):
# This module creates a 2D scatter matrix plot of the empirical
# attainment function inside folder.
# Sort through keyword arguments
opcat = ['Objective 1', 'Objective 2', 'Objective 3']
plotname = 'eaf_ScatterMatrix'
opal = 0
if 'folder' in kwargs:
plotname = kwargs['folder'] + plotname
if 'MethodName' in kwargs:
plotname = plotname + '_' + kwargs['MethodName']
if 'opcat' in kwargs:
opcat = kwargs['opcat']
if 'opal' in kwargs:
opal = kwargs['opal']
df_lsa = self.make_lsa_dataframe(**kwargs)
pal = seaborn.light_palette(colors[opal], reverse=True)
scat = seaborn.PairGrid(df_lsa, vars=opcat, hue='SuperLevel t/n [%]',
palette=pal)
scat = scat.map_diag(pyplot.hist)
scat = scat.map_offdiag(pyplot.scatter, linewidths=1, edgecolor="w", s=40)
# Set the tick labels to be at a 45 degree angle for better fit
for ax in scat.axes.flat:
ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda xax, p: format(int(xax))))
pyplot.setp(ax.get_xticklabels(), rotation=45)
scat.add_legend(frameon=True)
scat.fig.get_children()[-1].set_bbox_to_anchor((0.995, 0.925, 0, 0))
pyplot.savefig(plotname + '.eps', format='eps', dpi=4000)
pyplot.savefig(plotname + '.pdf', format='pdf', dpi=4000)
pyplot.close()
def get_attainment_surfaces(self, t=None):
if t or t == 0:
return self.lsa[t]
else:
return self.lsa
def multiset_sum(sets):
# This function takes the sequence of nondominated point sets and returns
# a multiset sum, allowing duplicate points
x = {}
m = 0
idnum = 0
# Add solutions to X
for xi in range(len(sets)):
m += len(sets[xi])
for k, v in sets[xi].items():
point = ApproxPoint(xi, k, v)
x[idnum] = point
idnum += 1
return x, m
def verify_nondominated(v, x):
# This module checks v against all the other fitness vectors in X,
# removing any dominated solution
for ki, pi in x.items():
# Return False if dominated by another solution in x
if dom1(pi.point, v):
return False
# If made it through the whole list, nondominated
return True
def dom1(u, v):
# dom(u, v) determines if fitness value u dominates fitness value v
# to find the Pareto set
nobj = 3
equal = 0
for i in range(nobj):
if u[i] == v[i]:
equal += 1
u1, u2, u3 = u[0], u[1], u[2]
v1, v2, v3 = v[0], v[1], v[2]
return equal != nobj and u1 <= v1 and u2 <= v2 and u3 <= v3
def init_surface_sentinels(n, p0, p1):
# This module initializes the empty attainment surfaces and the sentinels
# Summary attainment surface
lsa = [[] for _ in range(n)]
# Make tree with Sentinels
tree = bst.AVLTree()
tree.set_newroot(p0)
tree.insert(p1)
# Copy tree n times to lstar and xstar
lstar = []
xstar = []
for t in range(n):
lstar.append(deepcopy(tree))
xstar.append(deepcopy(tree))
return lsa, lstar, xstar
def retrieve_input_sequences(folder):
# This function imports the data in the text files in folder and returns a
# dict of numpy arrays for every sequence, contained in a larger list.
filenames = glob(folder + '*')
sets = []
for f in range(len(filenames)):
set_f = import_approximate_set(filenames[f])
sets.append(set_f)
return sets
def import_approximate_set(fname):
df_set = pd.read_csv(fname, sep='\t', index_col=0)
approxset = df_set.to_dict(orient='index')
# Transform items into np.arrays
for k, v in approxset.items():
fitvals = np.zeros(3, dtype=object)
fitvals[0] = int(v.get('f[1] (B)'))
fitvals[1] = float(v.get('f[2] (W)'))
fitvals[2] = float(v.get('f[3] (s)'))
approxset[k] = fitvals
return approxset
class ApproxPoint:
# This class maintains a connection between each point and its approximate set
def __init__(self, xi, key, fitvals):
self.set = xi
self.key = key
self.point = fitvals
self.x = fitvals[0]
self.y = fitvals[1]
self.z = fitvals[2]
def input_set(self):
return self.set
def __str__(self):
return 'ApproxPoint at {0}.'.format(self.point)
def main():
# Set the number of nondominated point sets to be input
# Set the location of the input files
folder = 'example/'
sets = retrieve_input_sequences(folder)
eaf_maker = EAF_3D(sets)
eaf_maker.transform()
df_lsa = eaf_maker.make_lsa_dataframe()
print(df_lsa)
eaf_maker.graph_eaf()
if __name__ == '__main__':
main()
| {
"content_hash": "34081a2484bc6675c82d765e979720dd",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 101,
"avg_line_length": 37.417582417582416,
"alnum_prop": 0.5681350954478708,
"repo_name": "kyspencer/3D_Empirical_Attainment_Function",
"id": "7c4e878fc985b087dc95cafefbfe443123b5e5a7",
"size": "13620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eaf3D.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "180800"
},
{
"name": "Python",
"bytes": "76666"
}
],
"symlink_target": ""
} |
import unittest
from yar.control import decode_errors, ProgrammerError
class ErrorDecodeTest(unittest.TestCase):
def test_error_decoding(self):
self.assertEqual(
decode_errors(0x80C80081),
"Error: Programming error Start line not set high Device not blank RAM error RAM end not on 1K boundary")
class ErrorClassTest(unittest.TestCase):
def test_programmer_error(self):
x = ProgrammerError(0x80C80081)
self.assertEqual(x.message, 'Error: Programming error Start line not set high Device not blank RAM error RAM end not on 1K boundary')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ec9ccfc0733a49440f1a84e8165c9109",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 141,
"avg_line_length": 30.761904761904763,
"alnum_prop": 0.7058823529411765,
"repo_name": "ieure/yar",
"id": "542ee6159351d54a7d5f8f80d5e1684b41517f4d",
"size": "734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yar/tests/test_control.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "264969"
}
],
"symlink_target": ""
} |
"""Fichier contenant le masque <groupe_existant>."""
from primaires.interpreteur.masque.masque import Masque
from primaires.interpreteur.masque.fonctions import *
from primaires.interpreteur.masque.exceptions.erreur_validation \
import ErreurValidation
class GroupeExistant(Masque):
"""Masque <groupe_existant>.
On attend un nom de groupe en paramètre.
"""
nom = "groupe_existant"
nom_complet = "nom d'un groupe existant"
def init(self):
"""Initialisation des attributs"""
self.nom_groupe = ""
def repartir(self, personnage, masques, commande):
"""Répartition du masque."""
nom_groupe = liste_vers_chaine(commande)
if not nom_groupe:
raise ErreurValidation(
"Précisez un nom de groupe existant.")
nom_groupe = nom_groupe.split(" ")[0]
self.a_interpreter = nom_groupe
commande[:] = commande[len(nom_groupe):]
masques.append(self)
return True
def valider(self, personnage, dic_masques):
"""Validation du masque"""
Masque.valider(self, personnage, dic_masques)
nom_groupe = self.a_interpreter
noms_groupes = [groupe.nom for groupe in \
type(self).importeur.interpreteur.groupes._groupes.values()]
if nom_groupe not in noms_groupes:
raise ErreurValidation(
"|err|Ce groupe est inconnu.|ff|")
self.nom_groupe = nom_groupe.lower()
return True
| {
"content_hash": "2653bf10a768224a439478135344b61a",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 32.42553191489362,
"alnum_prop": 0.6213910761154856,
"repo_name": "vlegoff/tsunami",
"id": "b733ff6e9cc206fdf994577757549510f306add4",
"size": "3097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/joueur/masques/groupe_existant/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes as attrs
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import portsecurity as psec
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class PortSecurityBinding(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be to able to
# instruct SQLAlchemy to eagerly load port security binding
port = orm.relationship(
models_v2.Port,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class NetworkSecurityBinding(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
port_security_enabled = sa.Column(sa.Boolean(), nullable=False)
# Add a relationship to the Port model in order to be able to instruct
# SQLAlchemy to eagerly load default port security setting for ports
# on this network
network = orm.relationship(
models_v2.Network,
backref=orm.backref("port_security", uselist=False,
cascade='delete', lazy='joined'))
class PortSecurityDbMixin(object):
"""Mixin class to add port security."""
def _process_network_port_security_create(
self, context, network_req, network_res):
with context.session.begin(subtransactions=True):
db = NetworkSecurityBinding(
network_id=network_res['id'],
port_security_enabled=network_req[psec.PORTSECURITY])
context.session.add(db)
network_res[psec.PORTSECURITY] = network_req[psec.PORTSECURITY]
return self._make_network_port_security_dict(db)
def _process_port_port_security_create(
self, context, port_req, port_res):
with context.session.begin(subtransactions=True):
db = PortSecurityBinding(
port_id=port_res['id'],
port_security_enabled=port_req[psec.PORTSECURITY])
context.session.add(db)
port_res[psec.PORTSECURITY] = port_req[psec.PORTSECURITY]
return self._make_port_security_dict(db)
def _extend_port_security_dict(self, response_data, db_data):
if ('port-security' in
getattr(self, 'supported_extension_aliases', [])):
psec_value = db_data['port_security'][psec.PORTSECURITY]
response_data[psec.PORTSECURITY] = psec_value
def _get_network_security_binding(self, context, network_id):
try:
query = self._model_query(context, NetworkSecurityBinding)
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _get_port_security_binding(self, context, port_id):
try:
query = self._model_query(context, PortSecurityBinding)
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
return binding[psec.PORTSECURITY]
def _process_port_port_security_update(
self, context, port_req, port_res):
if psec.PORTSECURITY in port_req:
port_security_enabled = port_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, PortSecurityBinding)
port_id = port_res['id']
binding = query.filter(
PortSecurityBinding.port_id == port_id).one()
binding.port_security_enabled = port_security_enabled
port_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _process_network_port_security_update(
self, context, network_req, network_res):
if psec.PORTSECURITY in network_req:
port_security_enabled = network_req[psec.PORTSECURITY]
else:
return
try:
query = self._model_query(context, NetworkSecurityBinding)
network_id = network_res['id']
binding = query.filter(
NetworkSecurityBinding.network_id == network_id).one()
binding.port_security_enabled = port_security_enabled
network_res[psec.PORTSECURITY] = port_security_enabled
except exc.NoResultFound:
raise psec.PortSecurityBindingNotFound()
def _make_network_port_security_dict(self, port_security, fields=None):
res = {'network_id': port_security['network_id'],
psec.PORTSECURITY: port_security[psec.PORTSECURITY]}
return self._fields(res, fields)
def _determine_port_security_and_has_ip(self, context, port):
"""Returns a tuple of booleans (port_security_enabled, has_ip).
Port_security is the value assocated with the port if one is present
otherwise the value associated with the network is returned. has_ip is
if the port is associated with an ip or not.
"""
has_ip = self._ip_on_port(port)
# we don't apply security groups for dhcp, router
if (port.get('device_owner') and
port['device_owner'].startswith('network:')):
return (False, has_ip)
if (psec.PORTSECURITY in port and
isinstance(port[psec.PORTSECURITY], bool)):
port_security_enabled = port[psec.PORTSECURITY]
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return (port_security_enabled, has_ip)
def _make_port_security_dict(self, port, fields=None):
res = {'port_id': port['port_id'],
psec.PORTSECURITY: port[psec.PORTSECURITY]}
return self._fields(res, fields)
def _ip_on_port(self, port):
return bool(port.get('fixed_ips'))
# Register dict extend functions for ports and networks
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.NETWORKS, ['_extend_port_security_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attrs.PORTS, ['_extend_port_security_dict'])
| {
"content_hash": "e6a779e9b73642f61c1ecfa09eba668d",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 78,
"avg_line_length": 41.21951219512195,
"alnum_prop": 0.6356508875739645,
"repo_name": "oeeagle/quantum",
"id": "e5ad6b19d42c551492b7d49ad40f62b96490edce",
"size": "7480",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "neutron/db/portsecurity_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1497"
},
{
"name": "Python",
"bytes": "7346644"
},
{
"name": "Shell",
"bytes": "8983"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.bio'
db.add_column('users_profile', 'bio',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
# Adding field 'Profile.website'
db.add_column('users_profile', 'website',
self.gf('django.db.models.fields.URLField')(default='', max_length=255),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.bio'
db.delete_column('users_profile', 'bio')
# Deleting field 'Profile.website'
db.delete_column('users_profile', 'website')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'users.profile': {
'Meta': {'object_name': 'Profile'},
'bio': ('django.db.models.fields.TextField', [], {'default': "''"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '255'})
}
}
complete_apps = ['users'] | {
"content_hash": "adf7666fe13f401848c7978b55fce7c5",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 182,
"avg_line_length": 59.77333333333333,
"alnum_prop": 0.5500780727191613,
"repo_name": "mozilla/gameon",
"id": "f3e4bf50617c37d679d3355746e554f406519541",
"size": "4507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gameon/users/migrations/0002_auto__add_field_profile_bio__add_field_profile_website.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "7140"
},
{
"name": "Puppet",
"bytes": "6677"
},
{
"name": "Python",
"bytes": "636706"
},
{
"name": "Ruby",
"bytes": "1462"
},
{
"name": "Shell",
"bytes": "3065"
}
],
"symlink_target": ""
} |
def f():
raise AssertionError('Silverlight test for throwing exception.')
| {
"content_hash": "8a5257f8fc723cbee1496377c41358f7",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 68,
"avg_line_length": 39,
"alnum_prop": 0.7435897435897436,
"repo_name": "tempbottle/dlr",
"id": "36a6f5e7c2e154bf98274769b23871f313f1c535",
"size": "79",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Src/Hosts/Silverlight/Tests/tests/regressions/fixtures/module_throw.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "4887"
},
{
"name": "Batchfile",
"bytes": "11220"
},
{
"name": "C",
"bytes": "1231"
},
{
"name": "C#",
"bytes": "7367932"
},
{
"name": "C++",
"bytes": "106677"
},
{
"name": "CSS",
"bytes": "8115"
},
{
"name": "HTML",
"bytes": "195477"
},
{
"name": "JavaScript",
"bytes": "99301"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "PowerShell",
"bytes": "2619"
},
{
"name": "Python",
"bytes": "282073"
},
{
"name": "Ruby",
"bytes": "84462"
},
{
"name": "Visual Basic",
"bytes": "9936"
}
],
"symlink_target": ""
} |
class RepresentationMixin(object):
def to_react_representation(self, context=None):
raise NotImplementedError("Missing property to_react_representation in class")
| {
"content_hash": "e6e1701aca530ab3d768249259c1d008",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 86,
"avg_line_length": 58.333333333333336,
"alnum_prop": 0.7771428571428571,
"repo_name": "Frojd/django-react-templatetags",
"id": "a1f17dd718bbc0560bbaf7b9e2efd8fa489fa85e",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "django_react_templatetags/mixins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "469"
},
{
"name": "HTML",
"bytes": "955"
},
{
"name": "Python",
"bytes": "54914"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
} |
import PythonQt
from PythonQt import QtCore, QtGui
from ddapp.timercallback import TimerCallback
from ddapp.asynctaskqueue import *
def startApplication(enableQuitTimer=False):
appInstance = QtGui.QApplication.instance()
if enableQuitTimer:
quitTimer = TimerCallback()
quitTimer.callback = appInstance.quit
quitTimer.singleShot(0.1)
appInstance.exec_()
def main():
UserPromptTask.promptsEnabled = False
q = AsyncTaskQueue()
q.addTask(PrintTask('start'))
q.addTask(DelayTask(0.1))
q.addTask(UserPromptTask('Continue?', testingValue=False))
#q.addTask(PauseTask())
q.addTask(PrintTask('done'))
q.addTask(QuitTask())
q.start()
globals().update(locals())
#_console.show()
startApplication(enableQuitTimer=False)
if __name__ == '__main__':
main()
| {
"content_hash": "64e4047a54e72145d715135c3b62ecd6",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 62,
"avg_line_length": 22.72972972972973,
"alnum_prop": 0.6872770511296076,
"repo_name": "empireryan/director",
"id": "aba1b5bcb91631bbb9adf67a161a69565c8c7bb7",
"size": "841",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/python/tests/testTaskQueue.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "119719"
},
{
"name": "C++",
"bytes": "386403"
},
{
"name": "CMake",
"bytes": "48084"
},
{
"name": "GLSL",
"bytes": "15443"
},
{
"name": "MATLAB",
"bytes": "144018"
},
{
"name": "Makefile",
"bytes": "4876"
},
{
"name": "Python",
"bytes": "1993315"
},
{
"name": "Shell",
"bytes": "1337"
}
],
"symlink_target": ""
} |
class Node(object):
def __init__(self, val, next, random):
self.val = val
self.next = next
self.random = random
class Solution(object):
def copyRandomList(self, head):
"""
:type head: RandomListNode
:rtype: RandomListNode
"""
if not head:
return None
p = head
while p:
node = Node(p.val, None, None)
node.next = p.next
p.next = node
p = node.next
p = head
while p:
if p.random:
p.next.random = p.random.next
p = p.next.next
p, newHead = head, head.next
while p:
next = p.next
p.next = next.next
if next.next:
next.next = next.next.next
p = p.next
return newHead
| {
"content_hash": "b2abcc16fb4dfd4b39f70ba0cf5b4db1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 45,
"avg_line_length": 23.833333333333332,
"alnum_prop": 0.4568764568764569,
"repo_name": "Lanceolata/code-problems",
"id": "cfb6e565847640d151be16d9124a706b6c30f8fb",
"size": "918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/leetcode/Question_138_Copy_List_with_Random_Pointer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127"
},
{
"name": "C++",
"bytes": "130299"
},
{
"name": "Java",
"bytes": "149575"
},
{
"name": "Python",
"bytes": "106289"
}
],
"symlink_target": ""
} |
"""Driver method for server_report.
Functions:
main -- main method
"""
import server_report.func as func
import sys
import getopt
import logging
helpstring = ("report [-h|--help] [-v|--verbose] "
"[-d|--debug] [-l|--list]\n"
" [-u|--users] [-j|--jobs] [-a|--active] [-e|--error]\n"
" [-E <uuid>|--endpoint=<uuid>]\n"
" [--start=<isodate>]\n"
" [--end=<isodate>]")
shortops = "hlE:ujeavd"
longops = ["help", "list", "endpoint=", "users", "jobs", "start=",
"end=", "active", "error", "verbose", "debug"]
def main():
"""Parse options and call appropriate method from func."""
arguments = sys.argv[1:]
startdate = ''
enddate = ''
endpoint = ''
logging_level = logging.WARNING
try:
opts, args = getopt.getopt(arguments, shortops, longops)
except getopt.GetoptError:
print("{}".format(helpstring))
sys.exit(1)
if len(opts) == 0:
print("{}".format(helpstring))
sys.exit(1)
for opt, arg in opts:
if opt in ("-h", "--help"):
print("{}".format(helpstring))
sys.exit(0)
elif opt in ("-E", "--endpoint"):
endpoint = arg
elif opt in ("--start"):
startdate = arg
elif opt in ("--end"):
enddate = arg
elif opt in ("-v", "--verbose"):
logging_level = logging.INFO
elif opt in ("-d", "--debug"):
logging_level = logging.DEBUG
logging.basicConfig(level=logging_level)
for opt, arg in opts:
if opt in ("-u", "--users"):
func.user_frequency(endpoint, startdate, enddate)
elif opt in ("-j", "--jobs"):
func.job_count(endpoint, startdate, enddate)
elif opt in ("-l", "--list"):
func.list_endpoints()
elif opt in ("-a", "--active"):
func.running_count(endpoint)
elif opt in ("-e", "--error"):
func.errored_jobs(endpoint, startdate, enddate)
if __name__ == "__main__":
main()
| {
"content_hash": "748d2efacf3e72b3dd2474bc476bb96e",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 31.238805970149254,
"alnum_prop": 0.5064500716674629,
"repo_name": "bsurc/server_report",
"id": "e0f97903a77c6d7bd5eca2d3399ed4d17694ee96",
"size": "2106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server_report/report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12307"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
from .ttypes import UTF8STRINGS, def_PY_RESERVED_KEYWORD
| {
"content_hash": "15c92702691a771db90e339a399470d1",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 184,
"avg_line_length": 33.714285714285715,
"alnum_prop": 0.8220338983050848,
"repo_name": "facebook/fbthrift",
"id": "d600b178661d7f9cf397c3a272f789e4f5438e0b",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "thrift/compiler/test/fixtures/py-reserved/gen-py/test/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15608"
},
{
"name": "C++",
"bytes": "10658844"
},
{
"name": "CMake",
"bytes": "147347"
},
{
"name": "CSS",
"bytes": "4028"
},
{
"name": "Cython",
"bytes": "339005"
},
{
"name": "Emacs Lisp",
"bytes": "11229"
},
{
"name": "Go",
"bytes": "447092"
},
{
"name": "Hack",
"bytes": "313122"
},
{
"name": "Java",
"bytes": "1990062"
},
{
"name": "JavaScript",
"bytes": "38872"
},
{
"name": "Mustache",
"bytes": "1269560"
},
{
"name": "Python",
"bytes": "1623026"
},
{
"name": "Ruby",
"bytes": "6111"
},
{
"name": "Rust",
"bytes": "283392"
},
{
"name": "Shell",
"bytes": "6615"
},
{
"name": "Thrift",
"bytes": "1859041"
},
{
"name": "Vim Script",
"bytes": "2887"
}
],
"symlink_target": ""
} |
"""Invocation-side implementation of gRPC Python."""
import sys
import threading
import time
import logging
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_USER_AGENT = 'Python-gRPC-{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_INFINITE_FUTURE = cygrpc.Timespec(float('+inf'))
_EMPTY_METADATA = cygrpc.Metadata(())
_UNARY_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_UNARY_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_UNARY_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,)
_STREAM_STREAM_INITIAL_DUE = (cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
def _deadline(timeout):
if timeout is None:
return None, _INFINITE_FUTURE
else:
deadline = time.time() + timeout
return deadline, cygrpc.Timespec(deadline)
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT = (
'Internal gRPC call error %d. ' +
'Please report to https://github.com/grpc/grpc/issues')
def _check_call_error(call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
raise ValueError('metadata was invalid: %s' % metadata)
elif call_error != cygrpc.CallError.ok:
raise ValueError(_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
def _call_error_set_RPCstate(state, call_error, metadata):
if call_error == cygrpc.CallError.invalid_metadata:
_abort(state, grpc.StatusCode.INTERNAL,
'metadata was invalid: %s' % metadata)
else:
_abort(state, grpc.StatusCode.INTERNAL,
_INTERNAL_CALL_ERROR_MESSAGE_FORMAT % call_error)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = _EMPTY_METADATA
state.trailing_metadata = _EMPTY_METADATA
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.received_metadata
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.received_message.bytes()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.received_metadata
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.received_status_code)
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
batch_operation.received_status_code,
batch_operation.received_status_details)
else:
state.code = code
state.details = batch_operation.received_status_details
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, call, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return call if done else None
return handle_event
def _consume_request_iterator(request_iterator, state, call,
request_serializer):
event_handler = _event_handler(state, call, None)
def consume_request_iterator():
while True:
try:
request = next(request_iterator)
except StopIteration:
break
except Exception as e:
logging.exception("Exception iterating requests!")
call.cancel()
_abort(state, grpc.StatusCode.UNKNOWN,
"Exception iterating requests!")
return
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
call.cancel()
details = 'Exception serializing request!'
_abort(state, grpc.StatusCode.INTERNAL, details)
return
else:
operations = (cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),)
call.start_client_batch(
cygrpc.Operations(operations), event_handler)
state.due.add(cygrpc.OperationType.send_message)
while True:
state.condition.wait()
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),)
call.start_client_batch(
cygrpc.Operations(operations), event_handler)
state.due.add(cygrpc.OperationType.send_close_from_client)
def stop_consumption_thread(timeout):
with state.condition:
if state.code is None:
call.cancel()
state.cancelled = True
_abort(state, grpc.StatusCode.CANCELLED, 'Cancelled!')
state.condition.notify_all()
consumption_thread = _common.CleanupThread(
stop_consumption_thread, target=consume_request_iterator)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
_abort(self._state, grpc.StatusCode.CANCELLED, 'Cancelled!')
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state, self._call,
self._response_deserializer)
self._call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
event_handler)
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return _common.application_metadata(self._state.initial_metadata)
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return _common.application_metadata(self._state.trailing_metadata)
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
else:
return '<_Rendezvous of RPC that terminated with ({}, {})>'.format(
self._state.code, _common.decode(self._state.details))
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._call.cancel()
self._state.cancelled = True
self._state.code = grpc.StatusCode.CANCELLED
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline, deadline_timespec = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), _EMPTY_METADATA, _EMPTY_METADATA,
grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, deadline_timespec, None, rendezvous
else:
return deadline, deadline_timespec, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _prepare(self, request, timeout, metadata):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
return None, None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
return state, operations, deadline, deadline_timespec, None
def _blocking(self, request, timeout, metadata, credentials):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
raise rendezvous
else:
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
call_error = call.start_client_batch(
cygrpc.Operations(operations), None)
_check_call_error(call_error, metadata)
_handle_event(completion_queue.poll(), state,
self._response_deserializer)
return state, call, deadline
def __call__(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
state, call, deadline = self._blocking(request, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self, request, timeout=None, metadata=None, credentials=None):
state, operations, deadline, deadline_timespec, rendezvous = self._prepare(
request, timeout, metadata)
if rendezvous:
return rendezvous
else:
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout=None, metadata=None, credentials=None):
deadline, deadline_timespec, serialized_request, rendezvous = (
_start_unary_request(request, timeout, self._request_serializer))
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call,
self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations((
cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),
)), event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata),
_EMPTY_FLAGS), cygrpc.operation_send_message(
serialized_request, _EMPTY_FLAGS),
cygrpc.operation_send_close_from_client(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _blocking(self, request_iterator, timeout, metadata, credentials):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
completion_queue = cygrpc.CompletionQueue()
call = self._channel.create_call(None, 0, completion_queue,
self._method, None, deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
None)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), None)
_check_call_error(call_error, metadata)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
while True:
event = completion_queue.poll()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call, deadline
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, False, deadline)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, deadline = self._blocking(request_iterator, timeout,
metadata, credentials)
return _end_unary_response_blocking(state, call, True, deadline)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_message(_EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline, deadline_timespec = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
call, drive_call = self._managed_call(None, 0, self._method, None,
deadline_timespec)
if credentials is not None:
call.set_credentials(credentials._credentials)
event_handler = _event_handler(state, call, self._response_deserializer)
with state.condition:
call.start_client_batch(
cygrpc.Operations(
(cygrpc.operation_receive_initial_metadata(_EMPTY_FLAGS),)),
event_handler)
operations = (
cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(metadata), _EMPTY_FLAGS),
cygrpc.operation_receive_status_on_client(_EMPTY_FLAGS),)
call_error = call.start_client_batch(
cygrpc.Operations(operations), event_handler)
if call_error != cygrpc.CallError.ok:
_call_error_set_RPCstate(state, call_error, metadata)
return _Rendezvous(state, None, None, deadline)
drive_call()
_consume_request_iterator(request_iterator, state, call,
self._request_serializer)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.completion_queue = cygrpc.CompletionQueue()
self.managed_calls = None
def _run_channel_spin_thread(state):
def channel_spin():
while True:
event = state.completion_queue.poll()
completed_call = event.tag(event)
if completed_call is not None:
with state.lock:
state.managed_calls.remove(completed_call)
if not state.managed_calls:
state.managed_calls = None
return
def stop_channel_spin(timeout):
with state.lock:
if state.managed_calls is not None:
for call in state.managed_calls:
call.cancel()
channel_spin_thread = _common.CleanupThread(
stop_channel_spin, target=channel_spin)
channel_spin_thread.start()
def _channel_managed_call_management(state):
def create(parent, flags, method, host, deadline):
"""Creates a managed cygrpc.Call and a function to call to drive it.
If operations are successfully added to the returned cygrpc.Call, the
returned function must be called. If operations are not successfully added
to the returned cygrpc.Call, the returned function must not be called.
Args:
parent: A cygrpc.Call to be used as the parent of the created call.
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A cygrpc.Timespec to be the deadline of the created call.
Returns:
A cygrpc.Call with which to conduct an RPC and a function to call if
operations are successfully started on the call.
"""
call = state.channel.create_call(parent, flags, state.completion_queue,
method, host, deadline)
def drive():
with state.lock:
if state.managed_calls is None:
state.managed_calls = set((call,))
_run_channel_spin_thread(state)
else:
state.managed_calls.add(call)
return call, drive
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = threading.Thread(
target=_deliver, args=(state, state.connectivity, callbacks,))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
completion_queue = cygrpc.CompletionQueue()
while True:
channel.watch_connectivity_state(connectivity,
cygrpc.Timespec(time.time() + 0.2),
completion_queue, None)
event = completion_queue.poll()
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
def cancel_all_subscriptions(timeout):
_moot(state)
polling_thread = _common.CleanupThread(
cancel_all_subscriptions,
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity
) in enumerate(state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(cygrpc.ChannelArgKey.primary_user_agent_string, _USER_AGENT)
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target),
_common.channel_args(_options(options)), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def __del__(self):
_moot(self._connectivity_state)
| {
"content_hash": "5f619fc985bed15624bf67d4c139d497",
"timestamp": "",
"source": "github",
"line_count": 948,
"max_line_length": 86,
"avg_line_length": 40.88291139240506,
"alnum_prop": 0.5789921820574347,
"repo_name": "infinit/grpc",
"id": "26d93faf75287e6d91ece853118257a5ad8426a4",
"size": "40285",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/_channel.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "23184"
},
{
"name": "C",
"bytes": "6529924"
},
{
"name": "C#",
"bytes": "1491805"
},
{
"name": "C++",
"bytes": "1884122"
},
{
"name": "CMake",
"bytes": "330222"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "318460"
},
{
"name": "M4",
"bytes": "39111"
},
{
"name": "Makefile",
"bytes": "768062"
},
{
"name": "Objective-C",
"bytes": "329623"
},
{
"name": "PHP",
"bytes": "152422"
},
{
"name": "Protocol Buffer",
"bytes": "114909"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1333921"
},
{
"name": "Ruby",
"bytes": "627618"
},
{
"name": "Shell",
"bytes": "56763"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
"""
The purpose of this script is to generate a clean directory
for upload to Arxiv. The script has several steps:
1. read the tex file
2. strip the comments (leaving a %)
3. flatten the file for input
4. re-strip the comments
5. find figures
6. make an arxiv directory with a timestamp
7. copy relevant class/style files
8. copy figures
9. copy the bbl file (or generating the bbl file)
10. copy extra files
usage:
python parxiv.py file.tex
"""
from __future__ import print_function
import glob
import re
import os
import io
import sys
import time
import shutil
import tempfile
import subprocess
import errno
import ply.lex
__version__ = '0.2.0'
# Python2 FileNotFoundError support
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def strip_comments(source):
"""
from https://gist.github.com/dzhuang/dc34cdd7efa43e5ecc1dc981cc906c85
"""
tokens = (
'PERCENT', 'BEGINCOMMENT', 'ENDCOMMENT',
'BACKSLASH', 'CHAR', 'BEGINVERBATIM',
'ENDVERBATIM', 'NEWLINE', 'ESCPCT',
'MAKEATLETTER', 'MAKEATOTHER',
)
states = (
('makeatblock', 'exclusive'),
('makeatlinecomment', 'exclusive'),
('linecomment', 'exclusive'),
('commentenv', 'exclusive'),
('verbatim', 'exclusive')
)
# Deal with escaped backslashes, so we don't
# think they're escaping %
def t_BACKSLASH(t):
r"\\\\"
return t
# Leaving all % in makeatblock
def t_MAKEATLETTER(t):
r"\\makeatletter"
t.lexer.begin("makeatblock")
return t
# One-line comments
def t_PERCENT(t):
r"\%"
t.lexer.begin("linecomment")
return t # keep the % as a blank comment
# Escaped percent signs
def t_ESCPCT(t):
r"\\\%"
return t
# Comment environment, as defined by verbatim package
def t_BEGINCOMMENT(t):
r"\\begin\s*{\s*comment\s*}"
t.lexer.begin("commentenv")
#Verbatim environment (different treatment of comments within)
def t_BEGINVERBATIM(t):
r"\\begin\s*{\s*verbatim\s*}"
t.lexer.begin("verbatim")
return t
#Any other character in initial state we leave alone
def t_CHAR(t):
r"."
return t
def t_NEWLINE(t):
r"\n"
return t
# End comment environment
def t_commentenv_ENDCOMMENT(t):
r"\\end\s*{\s*comment\s*}"
#Anything after \end{comment} on a line is ignored!
t.lexer.begin('linecomment')
# Ignore comments of comment environment
def t_commentenv_CHAR(t):
r"."
pass
def t_commentenv_NEWLINE(t):
r"\n"
pass
#End of verbatim environment
def t_verbatim_ENDVERBATIM(t):
r"\\end\s*{\s*verbatim\s*}"
t.lexer.begin('INITIAL')
return t
#Leave contents of verbatim environment alone
def t_verbatim_CHAR(t):
r"."
return t
def t_verbatim_NEWLINE(t):
r"\n"
return t
#End a % comment when we get to a new line
def t_linecomment_ENDCOMMENT(t):
r"\n"
t.lexer.begin("INITIAL")
# Newline at the end of a line comment is presevered.
return t
#Ignore anything after a % on a line
def t_linecomment_CHAR(t):
r"."
pass
def t_makeatblock_MAKEATOTHER(t):
r"\\makeatother"
t.lexer.begin('INITIAL')
return t
def t_makeatblock_BACKSLASH(t):
r"\\\\"
return t
# Escaped percent signs in makeatblock
def t_makeatblock_ESCPCT(t):
r"\\\%"
return t
# presever % in makeatblock
def t_makeatblock_PERCENT(t):
r"\%"
t.lexer.begin("makeatlinecomment")
return t
def t_makeatlinecomment_NEWLINE(t):
r"\n"
t.lexer.begin('makeatblock')
return t
# Leave contents of makeatblock alone
def t_makeatblock_CHAR(t):
r"."
return t
def t_makeatblock_NEWLINE(t):
r"\n"
return t
# For bad characters, we just skip over it
def t_ANY_error(t):
t.lexer.skip(1)
lexer = ply.lex.lex()
lexer.input(source)
return u"".join([tok.value for tok in lexer])
def find_class(source):
r"""
(unused)
look for \documentclass[review]{siamart}
then return 'siamart.cls'
"""
classname = re.search(r'\\documentclass.*{(.*)}', source)
if classname:
classname = classname.group(1) + '.cls'
return classname
def find_bibstyle(source):
r"""
look for \bibliographystyle{siamplain}
then return 'siamplain.bst'
"""
bibstylename = re.search(r'\\bibliographystyle{(.*)}', source)
if bibstylename:
bibstylename = bibstylename.group(1) + '.bst'
return bibstylename
def find_figs(source):
r"""
look for \graphicspath{{subdir}} (a single subdir)
find figures in \includegraphics[something]{PATH/filename.ext}
\includegraphics{PATH/filename.ext}
make them \includegraphics[something]{PATH-filename.ext}
\includegraphics{PATH-filename.ext}
later: copy figures to arxivdir
"""
findgraphicspath = re.search(r'\\graphicspath{(.*)}', source)
if findgraphicspath:
graphicspaths = findgraphicspath.group(1)
graphicspaths = re.findall('{(.*?)}', graphicspaths)
else:
graphicspaths = []
# keep a list of (figname, figpath)
figlist = []
def repl(m):
figpath = ''
figname = os.path.basename(m.group(2))
figpath = os.path.dirname(m.group(2)).lstrip('./')
if figpath:
newfigname = figpath.replace(' ', '_').replace('/', '_')+'_'+figname
else:
newfigname = figname
newincludegraphics = m.group(1) + newfigname + m.group(3)
figlist.append((figname, figpath, newfigname))
return newincludegraphics
source = re.sub(r'(\\includegraphics.*?{)(.*?)(})', repl, source)
return figlist, source, graphicspaths
def flatten(source):
"""
replace arguments of include{} and intput{}
only input can be nested
include adds a clearpage
includeonly not supported
"""
def repl(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
return newtext
def repl_include(m):
inputname = m.group(2)
if not os.path.isfile(inputname):
inputname = inputname + '.tex'
with io.open(inputname, encoding='utf-8') as f:
newtext = f.read()
newtext = '\\clearpage\n' + newtext
newtext = re.sub(r'(\\input{)(.*?)(})', repl, newtext)
newtext += '\\clearpage\n'
return newtext
dest = re.sub(r'(\\include{)(.*?)(})', repl_include, source, True)
dest = re.sub(r'(\\input{)(.*?)(})', repl, dest)
return dest
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("fname", metavar="filename.tex", help="name of texfile to arxiv")
args = parser.parse_args()
fname = args.fname
print('[parxiv] reading %s' % fname)
with io.open(fname, encoding='utf-8') as f:
source = f.read()
print('[parxiv] stripping comments')
source = strip_comments(source)
print('[parxiv] flattening source')
source = flatten(source)
print('[parxiv] stripping comments again')
source = strip_comments(source)
print('[parxiv] finding figures...')
figlist, source, graphicspaths = find_figs(source)
# print('[parxiv] finding article class and bib style')
# localbibstyle = find_bibstyle(source)
print('[parxiv] making directory', end='')
dirname = 'arxiv-' + time.strftime('%c').replace(' ', '-')
dirname = dirname.replace(':', '-')
print(' %s' % dirname)
os.makedirs(dirname)
print('[parxiv] copying class/style files')
# shutil.copy2(localclass, os.path.join(dirname, localclass))
# if localbibstyle is not None:
# shutil.copy2(localbibstyle, os.path.join(dirname, localbibstyle))
for bst in glob.glob('*.bst'):
shutil.copy2(bst, os.path.join(dirname, bst))
for sty in glob.glob('*.sty'):
shutil.copy2(sty, os.path.join(dirname, sty))
for cls in glob.glob('*.cls'):
shutil.copy2(cls, os.path.join(dirname, cls))
print('[parxiv] copying figures')
for figname, figpath, newfigname in figlist:
allpaths = graphicspaths
allpaths += ['./']
_, ext = os.path.splitext(figname)
if ext == '':
figname += '.pdf'
newfigname += '.pdf'
if figpath:
allpaths = [os.path.join(p, figpath) for p in allpaths]
for p in allpaths:
#if 'quartz' in newfigname:
# print(p)
src = os.path.join(p, figname)
dest = os.path.join(dirname, os.path.basename(newfigname))
try:
shutil.copy2(src, dest)
except IOError:
# attempts multiple graphics paths
pass
# copy bbl file
print('[parxiv] copying bbl file')
bblfile = fname.replace('.tex', '.bbl')
newbblfile = fname.replace('.tex', '_strip.bbl')
bblflag = False
try:
shutil.copy2(bblfile, os.path.join(dirname, newbblfile))
bblflag = True
except FileNotFoundError:
print(' ...skipping, not found')
# copy extra files
try:
with io.open('extra.txt', encoding='utf-8') as f:
inputsource = f.read()
except IOError:
print('[parxiv] copying no extra files')
else:
print('[parxiv] copying extra file(s): ', end='')
for f in inputsource.split('\n'):
if os.path.isfile(f):
localname = os.path.basename(f)
print(' %s' % localname, end='')
shutil.copy2(f, os.path.join(dirname, localname))
print('\n')
newtexfile = fname.replace('.tex', '_strip.tex')
print('[parxiv] writing %s' % newtexfile)
with io.open(
os.path.join(dirname, newtexfile), 'w') as fout:
fout.write(source)
print('[parxiv] attempting to generate bbl file')
if not bblflag:
# attempt to generate
# with tempfile.TemporaryDirectory() as d:
# python2 support
try:
d = tempfile.mkdtemp()
try:
args = ['pdflatex',
'-interaction', 'nonstopmode',
'-recorder',
'-output-directory', d,
newtexfile]
# python2 support
try:
from subprocess import DEVNULL
except ImportError:
DEVNULL = open(os.devnull, 'wb')
p = subprocess.Popen(args,
cwd=dirname,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
# copy .bib files
for bib in glob.glob('*.bib'):
shutil.copy2(bib, os.path.join(d, bib))
for bib in glob.glob('*.bst'):
shutil.copy2(bib, os.path.join(d, bib))
args = ['bibtex', newtexfile.replace('.tex', '.aux')]
p = subprocess.Popen(args,
cwd=d,
stdin=DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
except OSError as e:
raise RuntimeError(e)
bblfile = newtexfile.replace('.tex', '.bbl')
if os.path.isfile(os.path.join(d, bblfile)):
print(' ... generated')
shutil.copy2(os.path.join(d, bblfile),
os.path.join(dirname, bblfile))
else:
print(' ... could not generate')
finally:
try:
shutil.rmtree(d)
except OSError as e:
if e.errno != errno.ENOENT:
raise
return source
if __name__ == '__main__':
source = main()
| {
"content_hash": "98c180325b3b78f642aa21b97ce48aca",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 89,
"avg_line_length": 28.427293064876956,
"alnum_prop": 0.5474148107342409,
"repo_name": "lukeolson/clean-latex-to-arxiv",
"id": "7557201e596864b8f514a9d45122e61a5284f834",
"size": "12730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parxiv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12890"
},
{
"name": "TeX",
"bytes": "62480"
}
],
"symlink_target": ""
} |
import mock
import requests_mock
from unittest import TestCase
from resello import ReselloClient
from resello.managers.domain import DomainManager
class ReselloDomainManagerTestCase(TestCase):
def setUp(self):
self.client = ReselloClient(api_key='test', reseller_reference='test')
self.client.BASE_PATH = 'mock://test'
def test_if_domain_contacts_submanager_initiated(self):
"""
DomainManager should have contacts submanager.
"""
from resello.managers.domain_contacts import DomainContactsManager
manager = DomainManager(mock.MagicMock())
self.assertIsInstance(manager.contacts, DomainContactsManager)
@requests_mock.mock()
def test_if_tld_details_can_be_fetched(self, request_mock):
"""
Domain manager can retrieve TLD details for any supported TLD.
"""
expected_api_response = {'success': 'true', 'result': {'name': 'com'}}
request_mock.get('mock://test/domain-tld/com',
json=expected_api_response)
response = self.client.domain.tld(tld_name='com')
self.assertEqual('com', response.name)
| {
"content_hash": "19995e99cb1cc161f799cd0b1adfd750",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 34.088235294117645,
"alnum_prop": 0.6712683347713546,
"repo_name": "duct-tape/resello",
"id": "8b081bcd999c04d961a844156b04916de25b707b",
"size": "1159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/managers/test_domain_manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18683"
}
],
"symlink_target": ""
} |
import sys
import cherrypy
from cherrypy._cpcompat import ntou
from cherrypy._cptree import Application
from cherrypy.test import helper
script_names = ["", "/foo", "/users/fred/blog", "/corp/blog"]
class ObjectMappingTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self, name="world"):
return name
@cherrypy.expose
def foobar(self):
return "bar"
@cherrypy.expose
def default(self, *params, **kwargs):
return "default:" + repr(params)
@cherrypy.expose
def other(self):
return "other"
@cherrypy.expose
def extra(self, *p):
return repr(p)
@cherrypy.expose
def redirect(self):
raise cherrypy.HTTPRedirect('dir1/', 302)
def notExposed(self):
return "not exposed"
@cherrypy.expose
def confvalue(self):
return cherrypy.request.config.get("user")
@cherrypy.expose
def redirect_via_url(self, path):
raise cherrypy.HTTPRedirect(cherrypy.url(path))
@cherrypy.expose
def translate_html(self):
return "OK"
@cherrypy.expose
def mapped_func(self, ID=None):
return "ID is %s" % ID
setattr(Root, "Von B\xfclow", mapped_func)
class Exposing:
@cherrypy.expose
def base(self):
return "expose works!"
cherrypy.expose(base, "1")
cherrypy.expose(base, "2")
class ExposingNewStyle(object):
@cherrypy.expose
def base(self):
return "expose works!"
cherrypy.expose(base, "1")
cherrypy.expose(base, "2")
class Dir1:
@cherrypy.expose
def index(self):
return "index for dir1"
@cherrypy.expose
@cherrypy.config(**{'tools.trailing_slash.extra': True})
def myMethod(self):
return "myMethod from dir1, path_info is:" + repr(
cherrypy.request.path_info)
@cherrypy.expose
def default(self, *params):
return "default for dir1, param is:" + repr(params)
class Dir2:
@cherrypy.expose
def index(self):
return "index for dir2, path is:" + cherrypy.request.path_info
@cherrypy.expose
def script_name(self):
return cherrypy.tree.script_name()
@cherrypy.expose
def cherrypy_url(self):
return cherrypy.url("/extra")
@cherrypy.expose
def posparam(self, *vpath):
return "/".join(vpath)
class Dir3:
def default(self):
return "default for dir3, not exposed"
class Dir4:
def index(self):
return "index for dir4, not exposed"
class DefNoIndex:
@cherrypy.expose
def default(self, *args):
raise cherrypy.HTTPRedirect("contact")
# MethodDispatcher code
@cherrypy.expose
class ByMethod:
def __init__(self, *things):
self.things = list(things)
def GET(self):
return repr(self.things)
def POST(self, thing):
self.things.append(thing)
class Collection:
default = ByMethod('a', 'bit')
Root.exposing = Exposing()
Root.exposingnew = ExposingNewStyle()
Root.dir1 = Dir1()
Root.dir1.dir2 = Dir2()
Root.dir1.dir2.dir3 = Dir3()
Root.dir1.dir2.dir3.dir4 = Dir4()
Root.defnoindex = DefNoIndex()
Root.bymethod = ByMethod('another')
Root.collection = Collection()
d = cherrypy.dispatch.MethodDispatcher()
for url in script_names:
conf = {'/': {'user': (url or "/").split("/")[-2]},
'/bymethod': {'request.dispatch': d},
'/collection': {'request.dispatch': d},
}
cherrypy.tree.mount(Root(), url, conf)
class Isolated:
@cherrypy.expose
def index(self):
return "made it!"
cherrypy.tree.mount(Isolated(), "/isolated")
@cherrypy.expose
class AnotherApp:
def GET(self):
return "milk"
cherrypy.tree.mount(AnotherApp(), "/app",
{'/': {'request.dispatch': d}})
def testObjectMapping(self):
for url in script_names:
prefix = self.script_name = url
self.getPage('/')
self.assertBody('world')
self.getPage("/dir1/myMethod")
self.assertBody(
"myMethod from dir1, path_info is:'/dir1/myMethod'")
self.getPage("/this/method/does/not/exist")
self.assertBody(
"default:('this', 'method', 'does', 'not', 'exist')")
self.getPage("/extra/too/much")
self.assertBody("('too', 'much')")
self.getPage("/other")
self.assertBody('other')
self.getPage("/notExposed")
self.assertBody("default:('notExposed',)")
self.getPage("/dir1/dir2/")
self.assertBody('index for dir2, path is:/dir1/dir2/')
# Test omitted trailing slash (should be redirected by default).
self.getPage("/dir1/dir2")
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/dir2/' % self.base())
# Test extra trailing slash (should be redirected if configured).
self.getPage("/dir1/myMethod/")
self.assertStatus(301)
self.assertHeader('Location', '%s/dir1/myMethod' % self.base())
# Test that default method must be exposed in order to match.
self.getPage("/dir1/dir2/dir3/dir4/index")
self.assertBody(
"default for dir1, param is:('dir2', 'dir3', 'dir4', 'index')")
# Test *vpath when default() is defined but not index()
# This also tests HTTPRedirect with default.
self.getPage("/defnoindex")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/contact' % self.base())
self.getPage("/defnoindex/")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' %
self.base())
self.getPage("/defnoindex/page")
self.assertStatus((302, 303))
self.assertHeader('Location', '%s/defnoindex/contact' %
self.base())
self.getPage("/redirect")
self.assertStatus('302 Found')
self.assertHeader('Location', '%s/dir1/' % self.base())
if not getattr(cherrypy.server, "using_apache", False):
# Test that we can use URL's which aren't all valid Python
# identifiers
# This should also test the %XX-unquoting of URL's.
self.getPage("/Von%20B%fclow?ID=14")
self.assertBody("ID is 14")
# Test that %2F in the path doesn't get unquoted too early;
# that is, it should not be used to separate path components.
# See ticket #393.
self.getPage("/page%2Fname")
self.assertBody("default:('page/name',)")
self.getPage("/dir1/dir2/script_name")
self.assertBody(url)
self.getPage("/dir1/dir2/cherrypy_url")
self.assertBody("%s/extra" % self.base())
# Test that configs don't overwrite each other from diferent apps
self.getPage("/confvalue")
self.assertBody((url or "/").split("/")[-2])
self.script_name = ""
# Test absoluteURI's in the Request-Line
self.getPage('http://%s:%s/' % (self.interface(), self.PORT))
self.assertBody('world')
self.getPage('http://%s:%s/abs/?service=http://192.168.0.1/x/y/z' %
(self.interface(), self.PORT))
self.assertBody("default:('abs',)")
self.getPage('/rel/?service=http://192.168.120.121:8000/x/y/z')
self.assertBody("default:('rel',)")
# Test that the "isolated" app doesn't leak url's into the root app.
# If it did leak, Root.default() would answer with
# "default:('isolated', 'doesnt', 'exist')".
self.getPage("/isolated/")
self.assertStatus("200 OK")
self.assertBody("made it!")
self.getPage("/isolated/doesnt/exist")
self.assertStatus("404 Not Found")
# Make sure /foobar maps to Root.foobar and not to the app
# mounted at /foo. See
# https://github.com/cherrypy/cherrypy/issues/573
self.getPage("/foobar")
self.assertBody("bar")
def test_translate(self):
self.getPage("/translate_html")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/translate.html")
self.assertStatus("200 OK")
self.assertBody("OK")
self.getPage("/translate-html")
self.assertStatus("200 OK")
self.assertBody("OK")
def test_redir_using_url(self):
for url in script_names:
prefix = self.script_name = url
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the absolute path to the parent (leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
# Test the relative path to the parent (no leading slash)
self.getPage('/redirect_via_url/?path=./')
self.assertStatus(('302 Found', '303 See Other'))
self.assertHeader('Location', '%s/' % self.base())
def testPositionalParams(self):
self.getPage("/dir1/dir2/posparam/18/24/hut/hike")
self.assertBody("18/24/hut/hike")
# intermediate index methods should not receive posparams;
# only the "final" index method should do so.
self.getPage("/dir1/dir2/5/3/sir")
self.assertBody("default for dir1, param is:('dir2', '5', '3', 'sir')")
# test that extra positional args raises an 404 Not Found
# See https://github.com/cherrypy/cherrypy/issues/733.
self.getPage("/dir1/dir2/script_name/extra/stuff")
self.assertStatus(404)
def testExpose(self):
# Test the cherrypy.expose function/decorator
self.getPage("/exposing/base")
self.assertBody("expose works!")
self.getPage("/exposing/1")
self.assertBody("expose works!")
self.getPage("/exposing/2")
self.assertBody("expose works!")
self.getPage("/exposingnew/base")
self.assertBody("expose works!")
self.getPage("/exposingnew/1")
self.assertBody("expose works!")
self.getPage("/exposingnew/2")
self.assertBody("expose works!")
def testMethodDispatch(self):
self.getPage("/bymethod")
self.assertBody("['another']")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="HEAD")
self.assertBody("")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="POST", body="thing=one")
self.assertBody("")
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod")
self.assertBody(repr(['another', ntou('one')]))
self.assertHeader('Allow', 'GET, HEAD, POST')
self.getPage("/bymethod", method="PUT")
self.assertErrorPage(405)
self.assertHeader('Allow', 'GET, HEAD, POST')
# Test default with posparams
self.getPage("/collection/silly", method="POST")
self.getPage("/collection", method="GET")
self.assertBody("['a', 'bit', 'silly']")
# Test custom dispatcher set on app root (see #737).
self.getPage("/app")
self.assertBody("milk")
def testTreeMounting(self):
class Root(object):
@cherrypy.expose
def hello(self):
return "Hello world!"
# When mounting an application instance,
# we can't specify a different script name in the call to mount.
a = Application(Root(), '/somewhere')
self.assertRaises(ValueError, cherrypy.tree.mount, a, '/somewhereelse')
# When mounting an application instance...
a = Application(Root(), '/somewhere')
# ...we MUST allow in identical script name in the call to mount...
cherrypy.tree.mount(a, '/somewhere')
self.getPage('/somewhere/hello')
self.assertStatus(200)
# ...and MUST allow a missing script_name.
del cherrypy.tree.apps['/somewhere']
cherrypy.tree.mount(a)
self.getPage('/somewhere/hello')
self.assertStatus(200)
# In addition, we MUST be able to create an Application using
# script_name == None for access to the wsgi_environ.
a = Application(Root(), script_name=None)
# However, this does not apply to tree.mount
self.assertRaises(TypeError, cherrypy.tree.mount, a, None)
def testKeywords(self):
if sys.version_info < (3,):
return self.skip("skipped (Python 3 only)")
exec("""class Root(object):
@cherrypy.expose
def hello(self, *, name='world'):
return 'Hello %s!' % name
cherrypy.tree.mount(Application(Root(), '/keywords'))""")
self.getPage('/keywords/hello')
self.assertStatus(200)
self.getPage('/keywords/hello/extra')
self.assertStatus(404)
| {
"content_hash": "d8f05a58a60387b6501a13e83bbf2377",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 79,
"avg_line_length": 33.769767441860466,
"alnum_prop": 0.5498243922594863,
"repo_name": "heytcass/homeassistant-config",
"id": "c2b90bdc6d61b0f2d7f9a8a9ea89d4425100f56e",
"size": "14521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deps/cherrypy/test/test_objectmapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1007"
}
],
"symlink_target": ""
} |
import types
from functools import partial
import pytest
from stp_core.loop.eventually import eventually
from plenum.common.messages.node_messages import Commit
from stp_core.common.util import adict
from plenum.server.suspicion_codes import Suspicions
from plenum.test.helper import getNodeSuspicions, whitelistNode
from plenum.test.malicious_behaviors_node import makeNodeFaulty, \
sendDuplicate3PhaseMsg
from plenum.test.test_node import getNonPrimaryReplicas, getPrimaryReplica
from plenum.test import waits
@pytest.fixture("module")
def setup(txnPoolNodeSet):
primaryRep, nonPrimaryReps = getPrimaryReplica(txnPoolNodeSet, 0), \
getNonPrimaryReplicas(txnPoolNodeSet, 0)
faultyRep = nonPrimaryReps[0]
makeNodeFaulty(faultyRep.node, partial(sendDuplicate3PhaseMsg,
msgType=Commit, count=3,
instId=0))
# The node of the primary replica above should not be blacklisted by any
# other node since we are simulating multiple COMMIT messages and
# want to check for a particular suspicion
whitelistNode(faultyRep.node.name,
[node for node in txnPoolNodeSet if node != faultyRep.node],
Suspicions.DUPLICATE_CM_SENT.code)
# If the request is ordered then COMMIT will be rejected much earlier
for r in [primaryRep, *nonPrimaryReps]:
def do_nothing(self, commit):
pass
r._ordering_service._do_order = types.MethodType(do_nothing, r)
return adict(primaryRep=primaryRep, nonPrimaryReps=nonPrimaryReps,
faultyRep=faultyRep)
# noinspection PyIncorrectDocstring,PyUnusedLocal,PyShadowingNames
def testMultipleCommit(setup, looper, sent1):
"""
A replica sends multiple COMMIT messages to all other replicas. Other
replicas should raise suspicion for each duplicate COMMIT seen and it
should count only one COMMIT from that sender
"""
primaryRep, nonPrimaryReps, faultyRep = setup.primaryRep, \
setup.nonPrimaryReps, setup.faultyRep
def chkSusp():
for r in (primaryRep, *nonPrimaryReps):
if r.name != faultyRep.name:
# Every node except the one from which duplicate COMMIT was
# sent should raise suspicion twice, once for each extra
# PREPARE request
assert len(
getNodeSuspicions(
r.node,
Suspicions.DUPLICATE_CM_SENT.code)) == 2
numOfNodes = len(primaryRep.node.nodeReg)
timeout = waits.expectedTransactionExecutionTime(numOfNodes)
looper.run(eventually(chkSusp, retryWait=1, timeout=timeout))
| {
"content_hash": "93ab4a27af8dd0f3658b8067760a8e70",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 81,
"avg_line_length": 40.2463768115942,
"alnum_prop": 0.6726683471371984,
"repo_name": "evernym/zeno",
"id": "b23100c258010ea8abd77254d7b084bfa82e1a1a",
"size": "2777",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plenum/test/instances/test_multiple_commit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "531061"
}
],
"symlink_target": ""
} |
import sys
import os
from pprint import pformat
from six.moves import input
import linguistica as lxa
from linguistica.util import (ENCODING, PARAMETERS)
try:
FileNotFoundError
except NameError:
FileNotFoundError = OSError # no FileNotFoundError in Python 2
lxa_version = lxa.__version__
def determine_use_wordlist():
use_wordlist_response = None
while use_wordlist_response is None:
use_wordlist_response = input(
'\nAre you using a wordlist file? [N/y] ')
if use_wordlist_response and use_wordlist_response[0].lower() == 'y':
use_wordlist = True
else:
use_wordlist = False
return use_wordlist
def get_file_abspath():
file_abspath = None
while file_abspath is None:
file_path = input('\nPath to your file: ')
if sys.platform.startswith('win'):
file_path = file_path.replace('/', os.sep)
else:
file_path = file_path.replace('\\', os.sep)
file_abspath = os.path.abspath(file_path)
if not os.path.isfile(file_abspath):
print('Invalid file path!')
file_abspath = None
return file_abspath
def get_output_dir(output_dir_):
change_dir_response = None
while change_dir_response is None:
change_dir_response = input('Change it? [N/y] ')
if change_dir_response and change_dir_response[0].lower() == 'y':
new_output_dir = None
while new_output_dir is None:
new_output_dir = input('Specify output directory: ')
if sys.platform.startswith('win'):
new_output_dir = new_output_dir.replace('/', os.sep)
else:
new_output_dir = new_output_dir.replace('\\', os.sep)
new_output_dir = os.path.abspath(new_output_dir)
if not os.path.isdir(new_output_dir):
try:
os.mkdir(new_output_dir)
except FileNotFoundError:
print('Cannot make a new directory in a non-existing one!')
new_output_dir = None
output_dir_ = new_output_dir
return output_dir_
def get_encoding():
encoding = ENCODING
print('\nDefault encoding for input and output files:', encoding)
change_encoding_ans = None
while change_encoding_ans is None:
change_encoding_ans = input('Change encoding? [N/y] ')
if change_encoding_ans and change_encoding_ans[0].lower() == 'y':
new_encoding = None
while new_encoding is None:
new_encoding = input('New encoding: ')
if not new_encoding:
new_encoding = None
encoding = new_encoding
return encoding
def get_new_parameters():
change_parameters_ans = None
while change_parameters_ans is None:
change_parameters_ans = input('\nChange any parameters? [N/y] ')
new_parameter_value_pairs = list()
if change_parameters_ans and change_parameters_ans[0].lower() == 'y':
print('\nEnter parameter-value pairs\n'
'(e.g. "min_stem_length=3 max_affix_length=3" without quotes):')
parameter_value_str = None
while not parameter_value_str:
parameter_value_str = input()
for parameter_value in parameter_value_str.split():
try:
parameter, value = parameter_value.split('=')
except ValueError:
print('Invalid parameter-value pair: ' + parameter_value)
parameter_value_str = None
break
if parameter not in PARAMETERS:
print('Unknown parameter: ', parameter)
parameter_value_str = None
break
try:
value_int = int(value)
except ValueError:
print('Cannot parse {} as an integer for parameter {}'
.format(value, parameter))
parameter_value_str = None
break
new_parameter_value_pairs.append((parameter, value_int))
return new_parameter_value_pairs
def main():
print('\n================================================================'
'\nWelcome to Linguistica {}!'
'\n================================================================'
.format(lxa_version))
# --------------------------------------------------------------------------
# determine if file is a wordlist or a corpus text
use_wordlist = determine_use_wordlist()
print('--------------------------------------------')
# --------------------------------------------------------------------------
# get file path
file_abspath = get_file_abspath()
print('\nFull file path:\n{}'.format(file_abspath))
print('--------------------------------------------')
# --------------------------------------------------------------------------
# determine output directory
output_dir = os.path.join(os.path.dirname(file_abspath), 'lxa_outputs')
print('\nDefault output directory:\n{}'.format(output_dir))
output_dir = get_output_dir(output_dir)
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
print('--------------------------------------------')
# --------------------------------------------------------------------------
# change encoding, if instructed
encoding = get_encoding()
print('--------------------------------------------')
# --------------------------------------------------------------------------
# create the Linguistica object
if use_wordlist:
lxa_object = lxa.read_wordlist(file_abspath, encoding=encoding)
else:
lxa_object = lxa.read_corpus(file_abspath, encoding=encoding)
# --------------------------------------------------------------------------
# change parameters, if instructed
print('\nParameters:\n{}'.format(pformat(lxa_object.parameters())))
new_parameter_value_pairs = get_new_parameters()
if new_parameter_value_pairs:
lxa_object.change_parameters(**dict(new_parameter_value_pairs))
print('\nParameters after the changes:\n{}'
.format(pformat(lxa_object.parameters())))
print('--------------------------------------------')
# --------------------------------------------------------------------------
# run all Linguistica modules on the given file
print('\nRunning all Linguistica modules on the given file:')
lxa_object.run_all_modules(verbose=True)
print('--------------------------------------------')
# --------------------------------------------------------------------------
# output results as files
print('\nGenerating output files...\n')
lxa_object.output_all_results(directory=output_dir, verbose=True)
print('\nResults are in ' + output_dir)
| {
"content_hash": "b7cf869479bd6c71ae35308902d88d3d",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 80,
"avg_line_length": 31.130044843049326,
"alnum_prop": 0.5047536732929991,
"repo_name": "linguistica-uchicago/lxa5",
"id": "63061c4ae9c06e12c622e6e269c6c5f86cb43d09",
"size": "6968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linguistica/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155340"
},
{
"name": "Shell",
"bytes": "307"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from google.appengine.ext import ndb
from ferris.core.controller import Controller, route, route_with
from oauth2client.client import OAuth2WebServerFlow
from ferris.core.oauth2.user_credentials import UserCredentials as OAuth2UserCredentials
from ferris.core import settings
class Oauth(Controller):
@route
def start(self, session):
config = settings.get('oauth2')
session = ndb.Key(urlsafe=session).get()
callback_uri = self.uri(action='callback', _full=True)
flow = OAuth2WebServerFlow(
client_id=config['client_id'],
client_secret=config['client_secret'],
scope=session.scopes,
redirect_uri=callback_uri)
flow.params['state'] = session.key.urlsafe()
if session.admin or session.force_prompt:
flow.params['approval_prompt'] = 'force'
uri = flow.step1_get_authorize_url()
session.flow = flow
session.put()
return self.redirect(uri)
@route_with(template='/oauth2callback')
def callback(self):
session = ndb.Key(urlsafe=self.request.params['state']).get()
credentials = session.flow.step2_exchange(self.request.params['code'])
OAuth2UserCredentials.create(
user=self.user,
scopes=session.scopes,
credentials=credentials,
admin=session.admin
)
session.key.delete() # No need for the session any longer
return self.redirect(str(session.redirect))
| {
"content_hash": "dde115f8058ef0207a4a0f9f156ac31b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 30.431372549019606,
"alnum_prop": 0.6527061855670103,
"repo_name": "markEarvin/sl-contest-tracker",
"id": "b391e1dcf55a8792f4b2a7b65eefb9b385e989a1",
"size": "1552",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ferris/controllers/oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24412"
},
{
"name": "JavaScript",
"bytes": "2510"
},
{
"name": "Python",
"bytes": "301848"
},
{
"name": "Shell",
"bytes": "2268"
}
],
"symlink_target": ""
} |
"""Test the Legrand Home+ Control integration."""
from unittest.mock import patch
from homeassistant import config_entries, setup
from homeassistant.components.home_plus_control.const import (
CONF_SUBSCRIPTION_KEY,
DOMAIN,
)
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from tests.components.home_plus_control.conftest import (
CLIENT_ID,
CLIENT_SECRET,
SUBSCRIPTION_KEY,
)
async def test_loading(hass, mock_config_entry):
"""Test component loading."""
mock_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.home_plus_control.api.HomePlusControlAsyncApi.async_get_modules",
return_value={},
) as mock_check:
await setup.async_setup_component(
hass,
DOMAIN,
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
await hass.async_block_till_done()
assert len(mock_check.mock_calls) == 1
assert mock_config_entry.state is config_entries.ConfigEntryState.LOADED
async def test_loading_with_no_config(hass, mock_config_entry):
"""Test component loading failure when it has not configuration."""
mock_config_entry.add_to_hass(hass)
await setup.async_setup_component(hass, DOMAIN, {})
# Component setup fails because the oauth2 implementation could not be registered
assert mock_config_entry.state is config_entries.ConfigEntryState.SETUP_ERROR
async def test_unloading(hass, mock_config_entry):
"""Test component unloading."""
mock_config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.home_plus_control.api.HomePlusControlAsyncApi.async_get_modules",
return_value={},
) as mock_check:
await setup.async_setup_component(
hass,
DOMAIN,
{
"home_plus_control": {
CONF_CLIENT_ID: CLIENT_ID,
CONF_CLIENT_SECRET: CLIENT_SECRET,
CONF_SUBSCRIPTION_KEY: SUBSCRIPTION_KEY,
},
},
)
await hass.async_block_till_done()
assert len(mock_check.mock_calls) == 1
assert mock_config_entry.state is config_entries.ConfigEntryState.LOADED
# We now unload the entry
assert await hass.config_entries.async_unload(mock_config_entry.entry_id)
assert mock_config_entry.state is config_entries.ConfigEntryState.NOT_LOADED
| {
"content_hash": "e8cabc1579a057635e85d1e422a67aeb",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 99,
"avg_line_length": 34.653333333333336,
"alnum_prop": 0.6448634090034628,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "4da913047a212afce5f1304b4e44a20cbd5c8c71",
"size": "2599",
"binary": false,
"copies": "11",
"ref": "refs/heads/dev",
"path": "tests/components/home_plus_control/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""
Main handling code for Amazon Lambda
"""
from __future__ import print_function
import json
import random
import networking
import private
import re
TRY_AGAIN = "Please try again."
def lc_keys(mapping):
"""Lowercase the keys of a dict"""
return {k.lower(): v for k, v in mapping.iteritems()}
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" + event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']}, event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session'])
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId'] + ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
"""
Called when the user launches the skill without specifying what they
want like "Alexa open Ask a maester
"""
print("on_launch requestId=" + launch_request['requestId'] + ", sessionId=" + session['sessionId'])
return get_welcome_response()
def on_intent(intent_request, session):
"""Called when the user specifies an intent for this skill like Alexa ask a maester who is Jon Snow"""
print("on_intent sessionId={}, request={}".format(session["sessionId"], json.dumps(intent_request)))
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "GetCharacterInfo":
return get_character_info(intent, session)
elif intent_name == "GetHouseWords":
return get_house_words(intent, session)
elif intent_name == "GetActor":
return get_actor(intent, session)
elif intent_name == "GetOtherRoles":
return get_other_roles(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] + ", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "Hello there. You can ask who is Jon Snow, what are the words of House Stark, who plays Arya Stark, or what else has Lena Headey starred in."
# If the user either does not reply to the welcome message or says something
# that is not understood, they will be prompted again with this text.
reprompt_text = "Ask about a character such as, who is Jon Snow."
should_end_session = False
return build_response(session_attributes, build_speechlet_response(card_title, speech_output, reprompt_text, should_end_session))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Valar dohaeris"
# Setting this to true ends the session and exits the skill.
should_end_session = True
return build_response({}, build_speechlet_response(card_title, speech_output, None, should_end_session))
_CHAR_INFO = {
"Catelyn Stark": "Lady Catelyn Stark also called Catelyn Tully is the wife of Lord Eddard Stark and Lady of Winterfell. Together they have five children: Robb, Sansa, Arya, Bran, and Rickon. Catelyn was born into House Tully of Riverrun, the liege lords of the riverlands. She is the daughter of Lord Hoster Tully and Lady Minisa Whent, and her siblings are Lysa and Edmure.",
"Jon Snow": "Jon Snow is the bastard son of Eddard Stark, by a mother whose identity is a source of speculation. He was raised by his father alongside his true-born half-siblings, but joins the Night's Watch when he nears adulthood. He is constantly accompanied by his albino direwolf Ghost. At the beginning of A Game of Thrones, Jon is fourteen years old. He is one of the major POV characters in the books. In the television adaptation Game of Thrones, Jon is portrayed by Kit Harington.",
"Eddard Stark": "Eddard Stark, also called Ned, is the head of House Stark, Lord of Winterfell, and Warden of the North. He is a close friend to King Robert I Baratheon, with whom he was raised. Eddard is one of the major POV characters in the books. In the television adaptation Game of Thrones, he is played by Sean Bean in Season 1 and by Sebastian Croft and Robert Aramayo during flashbacks in Season 6.",
"Daenerys Targaryen": "Princess Daenerys Targaryen, known as Daenerys Stormborn and Dany, is one of the last confirmed members of House Targaryen, along with her older brother Viserys, and she is one of the major POV characters in A Song of Ice and Fire. In the television adaptation Game of Thrones, Daenerys is played by Emilia Clarke.",
"Tyrion Lannister": "Tyrion Lannister is a member of House Lannister and is the third and youngest child of Lord Tywin Lannister and the late Joanna Lannister. His older siblings are Cersei Lannister, the queen of King Robert Baratheon, and Ser Jaime Lannister, a knight of Robert's Kingsguard. Tyrion is a dwarf; because of this he is sometimes mockingly called the Imp and the Halfman. He is one of the major POV characters in the books. In the television adaptation Game of Thrones, Tyrion is played by Peter Dinklage.",
"Arya Stark": "Arya Stark is the third child and second daughter of Lord Eddard Stark and Lady Catelyn Tully. A member of House Stark, she has five siblings: brothers Robb, Bran, Rickon, half-brother Jon Snow, and older sister Sansa. She is a POV character in A Song of Ice and Fire and is portrayed by Maisie Williams in the television adaptation, Game of Thrones. Like some of her siblings, Arya sometimes dreams that she is a direwolf. Her own direwolf is Nymeria, who is named in reference to the Rhoynar warrior-queen of the same name."
}
CHAR_INFO = lc_keys(_CHAR_INFO)
def get_character_info(intent, session):
"""Get information about a character"""
card_title = intent['name']
title_format = "About {}"
session_attributes = {}
should_end_session = True
character = get_slot_value(intent, "character")
example = "You can ask by saying, who is {}.".format(random.choice(CHAR_INFO.keys()))
if not character:
base_error = "I'm not sure who you mean."
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
else:
hits = search("char_summary", {"name": character})
if hits:
result = hits[0]
card_title = title_format.format(result["_source"]["name"])
speech_output = result["_source"]["summary"]
reprompt_text = speech_output
else:
base_error = "I don't know about {}.".format(character)
card_title = title_format.format(character)
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_house_words(intent, session):
card_title = intent['name']
title_format = "The words of House {}"
session_attributes = {}
should_end_session = True
house = get_slot_value(intent, "house")
example = "You can ask by saying, what are the words of house {}.".format("Stark")
if not house:
base_error = "I'm not sure which house you mean."
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
else:
house_hits = search("house", {"name": house})
if house_hits:
result = house_hits[0]
card_title = title_format.format(result["_source"]["name"])
speech_output = result["_source"]["words"]
reprompt_text = speech_output
else:
base_error = "I don't know the words of house {}.".format(house)
card_title = title_format.format(house)
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def get_actor(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
character = get_slot_value(intent, "character")
example = "You can ask by saying, who plays {}.".format("Arya Stark")
if not character:
base_error = "I'm not sure who you mean."
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
else:
actor_hit = search("character", {"name": character})
if actor_hit:
card_title = character
actors = actor_hit[0]["_source"]["actors"]
character_long = actor_hit[0]["_source"]["name"]
speech_output = "{} is played by {}".format(character_long, generate_and(actors))
reprompt_text = speech_output
else:
base_error = "I don't know who plays {}.".format(character)
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
def generate_and(strings):
if len(strings) == 1:
return strings[0]
elif len(strings) == 2:
return " and ".join(strings)
else:
front = ", ".join(strings[:-1])
return front + ", and " + strings[-1]
def get_other_roles(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = True
actor = get_slot_value(intent, "actor")
example = "You can ask by saying, what else has {} starred in.".format("Emilia Clarke")
if not actor:
base_error = "I'm not sure who you mean."
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
else:
actor_hits = search("actor", {"name": actor})
if actor_hits and actor_hits[0]["_source"]["other_roles"]:
card_title = actor
other_roles = actor_hits[0]["_source"]["other_roles"]
speech_output = "{} has also starred in {}".format(actor, generate_and(other_roles))
reprompt_text = speech_output
else:
base_error = "I don't know about {}'s other roles.".format(actor)
speech_output = base_error + " " + TRY_AGAIN
reprompt_text = base_error + " " + example
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session))
# --------------- Helpers that build all of the responses ----------------------
def search(doc_type, query_doc, min_score=0):
"""
search the DB for matching information
Note: I haven't tested this when ES is down or firewalled.
:param doc_type: The database table (Elastic search sort of calls them types)
:param query_doc: dict listing the fields and field values to search
:param min_score: Filter any results below this score
:return: List of elasticsearch documents with score (info is in _source)
"""
assert isinstance(query_doc, dict)
es = networking.get_elasticsearch()
with_and = {k: {"query": v, "operator": "and"} for k, v in query_doc.items()}
res = es.search(index=private.ES_INDEX, doc_type=doc_type, body={"query": {"match": with_and}})
return [result for result in res["hits"]["hits"] if result["_score"] >= min_score]
def munge_speech_response(text):
if not text:
return text
mapping = {"Dany": "Danny", "POV": "P.O.V.", "Edmure": "Edmiure", "Sandor": "Sandore"}
for source, target in mapping.iteritems():
text = re.sub(r"\b{}\b".format(re.escape(source)), target, text)
return text
def get_slot_value(intent, slot):
if slot not in intent["slots"]:
return None
return intent["slots"][slot].get("value")
def build_speechlet_response(title, output, reprompt_text, should_end_session):
return {
'outputSpeech': {
'type': 'PlainText',
'text': munge_speech_response(output)
},
'card': {
'type': 'Simple',
'title': title,
'content': output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': munge_speech_response(reprompt_text)
}
},
'shouldEndSession': should_end_session
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.1',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
| {
"content_hash": "86d3e292b8f3bd85fbc9b8b0cec37bae",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 545,
"avg_line_length": 41.93529411764706,
"alnum_prop": 0.6542993407209987,
"repo_name": "ktrnka/maester_alexa",
"id": "9342cc7198c8506953ecc9ae9cf0d8d89258c443",
"size": "14258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47831"
},
{
"name": "Shell",
"bytes": "234"
}
],
"symlink_target": ""
} |
import urllib.request
from bs4 import BeautifulSoup
headers = { 'User-Agent' : 'Mozilla/5.0' }
URL = "given url"
page = urllib.request.Request(URL, None, headers)
page = urllib.request.urlopen(page)
data = page.read()
page.close()
def parse(data):
soup = BeautifulSoup(data, features='xml')
entries = soup.find_all('entry')
items = soup.find_all('item')
if entries:
for entry in entries:
print(entry.title.text)
print(entry.link.get('href'))
print('\n')
if entry.summary:
print(entry.summary.text + '\n')
elif entry.content:
print(entry.content.text + '\n')
elif items:
for item in items:
print(item.title.text)
print(item.link.text)
print('\n')
if item.description:
print(item.description.text + '\n')
parse(data)
| {
"content_hash": "eb4166fa01c69d807d8336c8e9c3306f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 51,
"avg_line_length": 28.4375,
"alnum_prop": 0.567032967032967,
"repo_name": "pragalakis/100-python-projects",
"id": "0bf5630f3171f87eeb5120eea76ac713b3632d29",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "text/rss-feed-creator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "65618"
}
],
"symlink_target": ""
} |
from direct.distributed.DistributedObject import DistributedObject
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
from otp.otpbase import OTPGlobals
from toontown.effects import DustCloud
class DistributedBankCollectable(DistributedObject):
def __init__(self, cr):
DistributedObject.__init__(self, cr)
self.grabbed = False
self.nodePath = None
self.bankCollectable = None
self.collNodePath = None
self.grabSound = None
self.rotateIval = None
self.floatIval = None
self.flyTrack = None
def announceGenerate(self):
DistributedObject.announceGenerate(self)
self.nodePath = NodePath(self.uniqueName('bankCollectable'))
self.nodePath.setScale(0.9)
self.bankCollectable = self.nodePath.attachNewNode('bankCollectable')
collSphere = CollisionSphere(0, 0, 0, 2)
collSphere.setTangible(0)
collNode = CollisionNode(self.uniqueName('bankCollectableSphere'))
collNode.setIntoCollideMask(OTPGlobals.WallBitmask)
collNode.addSolid(collSphere)
self.collNodePath = self.nodePath.attachNewNode(collNode)
model = loader.loadModel('phase_5.5/models/estate/jellybeanJar.bam')
model.setTransparency(TransparencyAttrib.MDual, 1)
model.find('**/jellybeansinbowl').setColorScale(1, 1, 1, 0.5)
model.reparentTo(self.bankCollectable)
self.grabSound = loader.loadSfx('phase_4/audio/sfx/SZ_DD_treasure.ogg')
self.nodePath.wrtReparentTo(render)
jellybeanjar = self.bankCollectable.find('**/jellybeanjar')
self.rotateTrack = LerpHprInterval(jellybeanjar, 5, Vec3(360, 0, 0))
self.rotateTrack.loop()
self.floatTrack = Sequence()
self.floatTrack.append(LerpPosInterval(self.nodePath, 2, Point3(-22, 27.5, 2), startPos=Point3(-22, 27.5, 1.5)))
self.floatTrack.append(LerpPosInterval(self.nodePath, 2, Point3(-22, 27.5, 1.5), startPos=Point3(-22, 27.5, 2)))
self.floatTrack.loop()
glow = jellybeanjar.copyTo(self.bankCollectable)
glow.setScale(1.1)
glowTrack = Sequence()
glowTrack.append(LerpColorScaleInterval(glow, 2.5, Vec4(0.6, 0.6, 0, 0.6), startColorScale=Vec4(0.4, 0.4, 0, 0.6)))
glowTrack.append(LerpColorScaleInterval(glow, 2.5, Vec4(0.4, 0.4, 0, 0.6), startColorScale=Vec4(0.6, 0.6, 0, 0.6)))
glowTrack.loop()
self.accept(self.uniqueName('enterbankCollectableSphere'), self.__handleEnterSphere)
def disable(self):
self.ignoreAll()
DistributedObject.disable(self)
def delete(self):
# When the bank collectable is deleted, and has not been grabbed, do a
# poof effect:
if not self.grabbed:
dustCloud = DustCloud.DustCloud(fBillboard=0)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(4)
dustCloud.setScale(0.4)
dustCloud.createTrack()
dustCloud.reparentTo(render)
dustCloud.setPos(self.nodePath.getPos())
Sequence(dustCloud.track, Func(dustCloud.destroy)).start()
if self.flyTrack is not None:
self.flyTrack.finish()
self.flyTrack = None
if self.floatTrack is not None:
self.floatTrack.finish()
self.floatTrack = None
if self.rotateTrack is not None:
self.rotateTrack.finish()
self.rotateTrack = None
if self.nodePath is not None:
self.nodePath.removeNode()
self.nodePath = None
DistributedObject.delete(self)
def grab(self, avId):
self.__handleGrab(avId)
def d_requestGrab(self):
self.sendUpdate('requestGrab', [])
def __handleUnexpectedExit(self):
if self.flyTrack:
self.flyTrack.finish()
self.flyTrack = None
def __handleEnterSphere(self, collEntry=None):
self.d_requestGrab()
def __handleGrab(self, avId):
self.collNodePath.stash()
self.grabbed = True
av = self.cr.doId2do.get(avId)
if not av:
self.nodePath.removeNode()
self.nodePath = None
return
base.playSfx(self.grabSound, node=self.nodePath)
self.nodePath.wrtReparentTo(av)
if self.flyTrack:
self.flyTrack.finish()
self.flyTrack = None
unexpectedExitEvent = av.uniqueName('disable')
self.accept(unexpectedExitEvent, self.__handleUnexpectedExit)
track = Sequence(
LerpPosInterval(
self.nodePath, 1, pos=Point3(0, 0, 3),
startPos=self.nodePath.getPos(), blendType='easeInOut'),
Func(self.nodePath.detachNode),
Func(self.ignore, unexpectedExitEvent))
self.flyTrack = Sequence(track, name=self.uniqueName('flyTrack'))
self.flyTrack.start()
| {
"content_hash": "d0db869c328f2b095fd59f13dd1fa5c8",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 123,
"avg_line_length": 34.52447552447553,
"alnum_prop": 0.6376341908041321,
"repo_name": "linktlh/Toontown-journey",
"id": "abd32e8b45602e853af31077cf5f92f11f1855aa",
"size": "4937",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "toontown/building/DistributedBankCollectable.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import sys, os, time, threading, errno
from .common import FSEvent, FSMonitorError
def get_dir_contents(path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
class FSMonitorDirWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorDirWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
def getstate(self):
return self._contents
def delstate(self):
self._contents = []
self._deleted = True
def setstate(self, state):
self._contents = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorFileWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._stat = os.stat(path)
self._deleted = False
except OSError as e:
self._stat = None
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorFileWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return os.stat(path)
def getstate(self):
return self._stat
def delstate(self):
self._stat = None
self._deleted = True
def setstate(self, state):
self._stat = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def _compare_contents(watch, new_contents, events_out, before):
name_to_new_stat = dict(new_contents)
for name, old_stat in watch._contents:
new_stat = name_to_new_stat.get(name)
if new_stat:
_compare_stat(watch, new_stat, events_out, before, old_stat, name)
else:
events_out.append(FSEvent(watch, FSEvent.Delete, name))
old_names = frozenset(x[0] for x in watch._contents)
for name, new_stat in new_contents:
if name not in old_names:
events_out.append(FSEvent(watch, FSEvent.Create, name))
def _compare_stat(watch, new_stat, events_out, before, old_stat, filename):
if new_stat.st_atime != old_stat.st_atime and new_stat.st_atime < before:
events_out.append(FSEvent(watch, FSEvent.Access, filename))
if new_stat.st_mtime != old_stat.st_mtime:
events_out.append(FSEvent(watch, FSEvent.Modify, filename))
def round_fs_resolution(t):
if sys.platform == "win32":
return t // 2 * 2
else:
return t // 1
class FSMonitor(object):
def __init__(self):
self.__lock = threading.Lock()
self.__dir_watches = set()
self.__file_watches = set()
self.polling_interval = 0.5
@property
def watches(self):
with self.__lock:
return list(self.__dir_watches) + list(self.__file_watches)
def add_dir_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorDirWatch(path, flags, user)
with self.__lock:
self.__dir_watches.add(watch)
return watch
def add_file_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorFileWatch(path, flags, user)
with self.__lock:
self.__file_watches.add(watch)
return watch
def remove_watch(self, watch):
with self.__lock:
if watch in self.__dir_watches:
self.__dir_watches.discard(watch)
elif watch in self.__file_watches:
self.__file_watches.discard(watch)
def remove_all_watches(self):
with self.__lock:
self.__dir_watches.clear()
self.__file_watches.clear()
def enable_watch(self, watch, enable=True):
watch.enabled = enable
def disable_watch(self, watch):
watch.enabled = False
def read_events(self, timeout=None):
now = start_time = time.time()
watches = self.watches
watches.sort(key=lambda watch: abs(now - watch._timestamp), reverse=True)
events = []
for watch in watches:
now = time.time()
if watch._timestamp < now:
tdiff = now - watch._timestamp
if tdiff < self.polling_interval:
time.sleep(self.polling_interval - tdiff)
watch._timestamp = now
if not watch.enabled:
continue
before = round_fs_resolution(time.time())
try:
new_state = watch.new_state(watch.path)
except OSError as e:
if e.errno == errno.ENOENT:
if not watch._deleted:
del watch.state
events.append(FSEvent(watch, FSEvent.DeleteSelf))
else:
if isinstance(watch, FSMonitorDirWatch):
_compare_contents(watch, new_state, events, before)
elif isinstance(watch, FSMonitorFileWatch):
_compare_stat(watch, new_state, events, before,
watch.state, watch.path)
watch.state = new_state
return events
| {
"content_hash": "0f5831edf718fe284e8432fd439b1e7c",
"timestamp": "",
"source": "github",
"line_count": 208,
"max_line_length": 81,
"avg_line_length": 29.78846153846154,
"alnum_prop": 0.5642349903163331,
"repo_name": "shaurz/fsmonitor",
"id": "0c873f69a18440e8e7c7b2463204d9290fbcbf4a",
"size": "6513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fsmonitor/polling.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26996"
}
],
"symlink_target": ""
} |
from st2common.models.api.action import ActionAliasAPI
from st2tests.fixturesloader import FixturesLoader
from tests import FunctionalTest
FIXTURES_PACK = 'aliases'
TEST_MODELS = {
'aliases': ['alias1.yaml', 'alias2.yaml']
}
TEST_LOAD_MODELS = {
'aliases': ['alias3.yaml']
}
class TestActionAlias(FunctionalTest):
models = None
alias1 = None
alias2 = None
alias3 = None
@classmethod
def setUpClass(cls):
super(TestActionAlias, cls).setUpClass()
cls.models = FixturesLoader().save_fixtures_to_db(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_MODELS)
cls.alias1 = cls.models['aliases']['alias1.yaml']
cls.alias2 = cls.models['aliases']['alias2.yaml']
loaded_models = FixturesLoader().load_models(fixtures_pack=FIXTURES_PACK,
fixtures_dict=TEST_LOAD_MODELS)
cls.alias3 = loaded_models['aliases']['alias3.yaml']
def test_get_all(self):
resp = self.app.get('/exp/actionalias')
self.assertEqual(resp.status_int, 200)
self.assertEqual(len(resp.json), 2, '/exp/actionalias did not return all aliases.')
retrieved_names = [alias['name'] for alias in resp.json]
self.assertEqual(retrieved_names, [self.alias1.name, self.alias2.name],
'Incorrect aliases retrieved.')
def test_get_one(self):
resp = self.app.get('/exp/actionalias/%s' % self.alias1.id)
self.assertEqual(resp.status_int, 200)
self.assertEqual(resp.json['name'], self.alias1.name,
'Incorrect aliases retrieved.')
def test_post_delete(self):
post_resp = self._do_post(vars(ActionAliasAPI.from_model(self.alias3)))
self.assertEqual(post_resp.status_int, 201)
get_resp = self.app.get('/exp/actionalias/%s' % post_resp.json['id'])
self.assertEqual(get_resp.status_int, 200)
self.assertEqual(get_resp.json['name'], self.alias3.name,
'Incorrect aliases retrieved.')
del_resp = self.__do_delete(post_resp.json['id'])
self.assertEqual(del_resp.status_int, 204)
get_resp = self.app.get('/exp/actionalias/%s' % post_resp.json['id'], expect_errors=True)
self.assertEqual(get_resp.status_int, 404)
def _do_post(self, actionalias, expect_errors=False):
return self.app.post_json('/exp/actionalias', actionalias, expect_errors=expect_errors)
def __do_delete(self, actionalias_id, expect_errors=False):
return self.app.delete('/exp/actionalias/%s' % actionalias_id, expect_errors=expect_errors)
| {
"content_hash": "4d76986d4a3df50a33a2c0f85f6ed265",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 99,
"avg_line_length": 37.901408450704224,
"alnum_prop": 0.6280193236714976,
"repo_name": "grengojbo/st2",
"id": "9efda9a384ef89059147ba18a31d0846ebb4f1c7",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "st2api/tests/unit/controllers/exp/test_action_alias.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "198"
},
{
"name": "Makefile",
"bytes": "21186"
},
{
"name": "PowerShell",
"bytes": "299"
},
{
"name": "Python",
"bytes": "2091976"
},
{
"name": "Shell",
"bytes": "7518"
},
{
"name": "Slash",
"bytes": "677"
}
],
"symlink_target": ""
} |
from tempest_lib.tests import fake_auth_provider
from tempest.services.compute.json import aggregates_client
from tempest.tests.services.compute import base
class TestAggregatesClient(base.BaseComputeServiceTest):
FAKE_SHOW_AGGREGATE = {
"aggregate":
{
"name": "hoge",
"availability_zone": None,
"deleted": False,
"created_at":
"2015-07-16T03:07:32.000000",
"updated_at": None,
"hosts": [],
"deleted_at": None,
"id": 1,
"metadata": {}
}
}
FAKE_CREATE_AGGREGATE = {
"aggregate":
{
"name": u'\xf4',
"availability_zone": None,
"deleted": False,
"created_at": "2015-07-21T04:11:18.000000",
"updated_at": None,
"deleted_at": None,
"id": 1
}
}
FAKE_UPDATE_AGGREGATE = {
"aggregate":
{
"name": u'\xe9',
"availability_zone": None,
"deleted": False,
"created_at": "2015-07-16T03:07:32.000000",
"updated_at": "2015-07-23T05:16:29.000000",
"hosts": [],
"deleted_at": None,
"id": 1,
"metadata": {}
}
}
def setUp(self):
super(TestAggregatesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = aggregates_client.AggregatesClient(
fake_auth, 'compute', 'regionOne')
def _test_list_aggregates(self, bytes_body=False):
self.check_service_client_function(
self.client.list_aggregates,
'tempest.common.service_client.ServiceClient.get',
{"aggregates": []},
bytes_body)
def test_list_aggregates_with_str_body(self):
self._test_list_aggregates()
def test_list_aggregates_with_bytes_body(self):
self._test_list_aggregates(bytes_body=True)
def _test_show_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.show_aggregate,
'tempest.common.service_client.ServiceClient.get',
self.FAKE_SHOW_AGGREGATE,
bytes_body,
aggregate_id=1)
def test_show_aggregate_with_str_body(self):
self._test_show_aggregate()
def test_show_aggregate_with_bytes_body(self):
self._test_show_aggregate(bytes_body=True)
def _test_create_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.create_aggregate,
'tempest.common.service_client.ServiceClient.post',
self.FAKE_CREATE_AGGREGATE,
bytes_body,
name='hoge')
def test_create_aggregate_with_str_body(self):
self._test_create_aggregate()
def test_create_aggregate_with_bytes_body(self):
self._test_create_aggregate(bytes_body=True)
def test_delete_aggregate(self):
self.check_service_client_function(
self.client.delete_aggregate,
'tempest.common.service_client.ServiceClient.delete',
{}, aggregate_id="1")
def _test_update_aggregate(self, bytes_body=False):
self.check_service_client_function(
self.client.update_aggregate,
'tempest.common.service_client.ServiceClient.put',
self.FAKE_UPDATE_AGGREGATE,
bytes_body,
aggregate_id=1)
def test_update_aggregate_with_str_body(self):
self._test_update_aggregate()
def test_update_aggregate_with_bytes_body(self):
self._test_update_aggregate(bytes_body=True)
| {
"content_hash": "70c75f24430733ce7a586439b695a77c",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 65,
"avg_line_length": 31.487179487179485,
"alnum_prop": 0.5735613463626493,
"repo_name": "xbezdick/tempest",
"id": "e92b76b12af658addc3520d4523a5047526bdcfc",
"size": "4315",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tempest/tests/services/compute/test_aggregates_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2880166"
},
{
"name": "Shell",
"bytes": "8578"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="carpet.aaxis.tickformatstop",
**kwargs,
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "valType": "any"},
{"editType": "calc", "valType": "any"},
],
),
**kwargs,
)
| {
"content_hash": "d8de7529191ed620d300920d014c0e65",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 75,
"avg_line_length": 30.217391304347824,
"alnum_prop": 0.4892086330935252,
"repo_name": "plotly/plotly.py",
"id": "7d0756cf928a7c7d6f9d27a07ff6b0e97b954cc6",
"size": "695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/carpet/aaxis/tickformatstop/_dtickrange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
from django.template import Context
from django.template.loader import get_template
from django import template
register = template.Library()
@register.filter
def bootstrap(element, options=''):
element_type = element.__class__.__name__.lower()
if options == 'nolabel':
nolabel = True
else:
nolabel = False
if element_type == 'boundfield':
template = get_template("bootstrapform/field.html")
context = Context({'field': element, 'nolabel': nolabel})
else:
template = get_template("bootstrapform/form.html")
context = Context({'form': element, 'nolabel': nolabel})
return template.render(context)
@register.filter
def is_checkbox(field):
return field.field.widget.__class__.__name__.lower() == "checkboxinput"
@register.filter
def is_radio(field):
return field.field.widget.__class__.__name__.lower() == "radioselect"
| {
"content_hash": "970223ca50e72724e9df4283934be087",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 75,
"avg_line_length": 26.676470588235293,
"alnum_prop": 0.6670341786108048,
"repo_name": "Alerion/django-bootstrap-form",
"id": "5d90d049e0be204cc6a951b9631b7b8b0a1f125e",
"size": "907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bootstrapform/templatetags/bootstrap.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from django import test
from select2_generic_m2m.test_forms import GenericSelect2TestMixin
from .forms import TForm
from .models import TModel
class GM2MFormTest(GenericSelect2TestMixin, test.TestCase):
model = TModel
form = TForm
url_name = 'select2_gm2m'
def assert_relation_equals(self, expected, result):
self.assertEquals(len(expected), len(result))
for o in result:
self.assertIn(getattr(o, 'gm2m_tgt', o), expected)
| {
"content_hash": "95e66290a6dcc2539eaef6ab234b04d8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 66,
"avg_line_length": 26.27777777777778,
"alnum_prop": 0.7103594080338267,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "a25726b3635a325d8a30e94e3d1626aabd1e0e6d",
"size": "473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_project/select2_gm2m/test_forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
} |
import accelerometer.accClassification
import accelerometer.accUtils
import accelerometer.device
import accelerometer.summariseEpoch
name = "accelerometer"
| {
"content_hash": "a66fec3045d2590fa3bb632d6cdfa2de",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 31.2,
"alnum_prop": 0.8846153846153846,
"repo_name": "computationalEpidemiology/biobankAccelerometerAnalysis",
"id": "4e8af9e4f398e87fcf75cadee2755c90593984d3",
"size": "156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "accelerometer/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Java",
"bytes": "55686"
},
{
"name": "Python",
"bytes": "72280"
}
],
"symlink_target": ""
} |
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
from mne.datasets import testing
from mne.io import read_raw_fif
from mne.preprocessing import (regress_artifact, create_eog_epochs,
EOGRegression, read_eog_regression)
from mne.utils import requires_version
data_path = testing.data_path(download=False)
raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
@testing.requires_testing_data
def test_regress_artifact():
"""Test regressing artifact data."""
raw = read_raw_fif(raw_fname).pick_types(meg=False, eeg=True, eog=True)
raw.load_data()
epochs = create_eog_epochs(raw)
epochs.apply_baseline((None, None))
orig_data = epochs.get_data('eeg')
orig_norm = np.linalg.norm(orig_data)
epochs_clean, betas = regress_artifact(epochs)
regress_artifact(epochs, betas=betas, copy=False) # inplace, and w/betas
assert_allclose(epochs_clean.get_data(), epochs.get_data())
clean_data = epochs_clean.get_data('eeg')
clean_norm = np.linalg.norm(clean_data)
assert orig_norm / 2 > clean_norm > orig_norm / 10
with pytest.raises(ValueError, match=r'Invalid value.*betas\.shape.*'):
regress_artifact(epochs, betas=betas[:-1])
# Regressing channels onto themselves should work
epochs, betas = regress_artifact(epochs, picks='eog', picks_artifact='eog')
assert np.ptp(epochs.get_data('eog')) < 1E-15 # constant value
assert_allclose(betas, 1)
@testing.requires_testing_data
def test_eog_regression():
"""Test regressing artifact data using the EOGRegression class."""
raw_meg_eeg = read_raw_fif(raw_fname)
raw = raw_meg_eeg.copy().pick(['eeg', 'eog', 'stim'])
# Test various errors
with pytest.raises(RuntimeError, match='Projections need to be applied'):
model = EOGRegression(proj=False).fit(raw)
with pytest.raises(RuntimeError, match='requires raw data to be loaded'):
model = EOGRegression().fit(raw)
raw.load_data()
# Test regression on raw data
model = EOGRegression()
assert str(model) == '<EOGRegression | not fitted>'
model.fit(raw)
assert str(model) == '<EOGRegression | fitted to 1 artifact channel>'
assert model.coef_.shape == (59, 1) # 59 EEG channels, 1 EOG channel
raw_clean = model.apply(raw)
# Some signal must have been removed
assert np.ptp(raw_clean.get_data('eeg')) < np.ptp(raw.get_data('eeg'))
# Test regression on epochs
epochs = create_eog_epochs(raw)
model = EOGRegression().fit(epochs)
epochs = model.apply(epochs)
# Since these were blinks, they should be mostly gone
assert np.ptp(epochs.get_data('eeg')) < 1E-4
# Test regression on evoked
evoked = epochs.average('all')
model = EOGRegression().fit(evoked)
evoked = model.apply(evoked)
assert model.coef_.shape == (59, 1)
# Since this was a blink evoked, signal should be mostly gone
assert np.ptp(evoked.get_data('eeg')) < 1E-4
# Test regression on evoked and applying to raw, with different ordering of
# channels. This should not work.
raw_ = raw.copy().drop_channels(['EEG 001'])
raw_ = raw_.add_channels([raw.copy().pick(['EEG 001'])])
model = EOGRegression().fit(evoked)
with pytest.raises(ValueError, match='data channels are not compatible'):
model.apply(raw_)
# Test in-place operation
raw_ = model.apply(raw, copy=False)
assert raw_ is raw
assert raw_._data is raw._data
raw_ = model.apply(raw, copy=True)
assert raw_ is not raw
assert raw_._data is not raw._data
# Test plotting with one channel type
fig = model.plot()
assert len(fig.axes) == 2 # (one topomap and one colorbar)
assert fig.axes[0].title.get_text() == 'eeg/EOG 061'
# Test plotting with multiple channel types
raw_meg_eeg.load_data()
fig = EOGRegression().fit(raw_meg_eeg).plot()
assert len(fig.axes) == 6 # (3 topomaps and 3 colorbars)
assert fig.axes[0].title.get_text() == 'grad/EOG 061'
assert fig.axes[1].title.get_text() == 'mag/EOG 061'
assert fig.axes[2].title.get_text() == 'eeg/EOG 061'
# Test plotting with multiple channel types, multiple regressors)
m = EOGRegression(picks_artifact=['EEG 001', 'EOG 061']).fit(raw_meg_eeg)
assert str(m) == '<EOGRegression | fitted to 2 artifact channels>'
fig = m.plot()
assert len(fig.axes) == 12 # (6 topomaps and 3 colorbars)
assert fig.axes[0].title.get_text() == 'grad/EEG 001'
assert fig.axes[1].title.get_text() == 'mag/EEG 001'
assert fig.axes[4].title.get_text() == 'mag/EOG 061'
assert fig.axes[5].title.get_text() == 'eeg/EOG 061'
@requires_version('h5io')
@testing.requires_testing_data
def test_read_eog_regression(tmp_path):
"""Test saving and loading an EOGRegression object."""
raw = read_raw_fif(raw_fname).pick(['eeg', 'eog'])
raw.load_data()
model = EOGRegression().fit(raw)
model.save(tmp_path / 'weights.h5', overwrite=True)
model2 = read_eog_regression(tmp_path / 'weights.h5')
assert_array_equal(model.picks, model2.picks)
assert_array_equal(model.picks_artifact, model2.picks_artifact)
assert_array_equal(model.exclude, model2.exclude)
assert_array_equal(model.coef_, model2.coef_)
assert model.proj == model2.proj
assert model.info_.keys() == model2.info_.keys()
| {
"content_hash": "ae0c3a637df8902e1225b528c2610a18",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 40.78947368421053,
"alnum_prop": 0.672073732718894,
"repo_name": "wmvanvliet/mne-python",
"id": "d33f5da95fec312079103cf323ea25dcd852129b",
"size": "5501",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "mne/preprocessing/tests/test_regress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "14962"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "10372316"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19970"
}
],
"symlink_target": ""
} |
from inspect import signature
from typing import Callable, Iterable
from typing import Union
from hotikeys.core import HotkeyCore
from hotikeys.enums import KeyState, Key, EventId
from hotikeys.lleventargs import LowLevelKeyboardArgs, LowLevelMouseArgs
EventArgs = Union[LowLevelKeyboardArgs, LowLevelMouseArgs]
_HandlerArg = Callable[[EventArgs], None]
_KeyArg = Union[int, Key]
_EventArg = Union[int, EventId, KeyState]
class Hotkey(HotkeyCore):
def __init__(self,
handler: _HandlerArg,
key: _KeyArg = None,
modifiers: Union[_KeyArg, Iterable[_KeyArg], None] = None,
events: Union[_EventArg, Iterable[_EventArg], None] = KeyState.Down):
self._handler = None # type: _HandlerArg
self._key = None # type:
self._modifiers = () # type: Iterable[int]
self._events = () # type: Iterable[int]
self._handler_takes_args = None # type: bool
self.handler = handler
self.key = key
self.modifiers = modifiers
self.events = events
def on_event(self, args):
if not isinstance(args, (LowLevelKeyboardArgs, LowLevelMouseArgs)):
raise TypeError('expected (LowLevelKeyboardArgs, LowLevelMouseArgs)'
' for args, got {0}'.format(type(args)))
if not self._match_key(args): return
if not self._match_modifiers(): return
if not self._match_events(args): return
if self._handler_takes_args:
self.handler(args)
else:
self.handler()
def on_mouse(self, args):
if not self._match_events(args, False): return
self.handler(args)
def _match_key(self, args, implicit=True) -> bool:
if self.key is None and implicit: return True
return self.key == args.vkey
def _match_modifiers(self, implicit=True) -> bool:
if not self.modifiers and implicit: return True
return all(self.is_pressed(key) for key in self.modifiers)
def _match_events(self, args, implicit=True) -> bool:
if not self.events and implicit: return True
if args.event is None: return False
if int(args.event) in self.events: return True
if args.event.state is None: return False
if int(args.event.state) in self.events: return True
return False
@property
def handler(self) -> _HandlerArg:
return self._handler
@handler.setter
def handler(self, handler):
if not callable(handler):
raise TypeError('expected callable for handler, received: {0}'.format(type(handler)))
self._handler_takes_args = bool(len(signature(handler).parameters))
self._handler = handler
@property
def key(self) -> int:
return self._key
@key.setter
def key(self, key):
if key is not None:
self._key = int(key)
else:
self._key = None
@property
def modifiers(self) -> Iterable[int]:
return self._modifiers
@modifiers.setter
def modifiers(self, modifiers):
if modifiers is not None:
if isinstance(modifiers, (int, Key)):
modifiers = (modifiers,)
if not isinstance(modifiers, (list, tuple)):
raise TypeError('expected Key, int, list or tuple'
' for modifiers, received: {0}'.format(type(modifiers)))
self._modifiers = tuple(int(modifier) for modifier in modifiers)
else:
self._modifiers = ()
@property
def events(self) -> Iterable[int]:
return self._events
@events.setter
def events(self, events):
if events is not None:
if isinstance(events, (int, EventId, KeyState)):
events = (events,)
if not isinstance(events, (list, tuple)):
raise TypeError('expected EventId, KeyState, int, list or tuple'
' for events, received: {0}'.format(type(events)))
self._events = tuple(int(event) for event in events)
else:
self._events = ()
def newhotkey(key=None, modifiers=None, events=KeyState.Down):
def decorator(handler):
return Hotkey(handler, key, modifiers, events)
return decorator
| {
"content_hash": "99df98c992312145ac7269e35e86bb16",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 97,
"avg_line_length": 34.78225806451613,
"alnum_prop": 0.6049153721307674,
"repo_name": "appul/HotiKeys",
"id": "f16023994e0951d3596882a206db7adaa612b2cc",
"size": "4313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hotikeys/hotkey.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27285"
}
],
"symlink_target": ""
} |
from dragonfly import (Grammar, AppContext, MappingRule, Dictation, Key, Text)
firefox_context = AppContext(executable="firefox")
grammar = Grammar("firefox", context=firefox_context)
firefox_rules = MappingRule(
name = "firefox",
mapping = {
"jump": Key("f12"),
"edit": Key("cs-f4"),
},
extras = [
Dictation("text")
]
)
grammar.add_rule(firefox_rules)
grammar.load()
def unload():
global grammar
if grammar: grammar.unload()
grammar = None
| {
"content_hash": "656c3d0191d48dd280133558588a14bc",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 21,
"alnum_prop": 0.6329365079365079,
"repo_name": "simianhacker/code-by-voice",
"id": "db41f4497814de1595d68808a0637a7fa4250241",
"size": "504",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "macros/firefox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "427"
},
{
"name": "Python",
"bytes": "89834"
}
],
"symlink_target": ""
} |
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import threading
from resource_management.core.exceptions import Fail
# concurrent kinit's can cause the following error:
# Internal credentials cache error while storing credentials while getting initial credentials
LOCK_TYPE_KERBEROS = "KERBEROS_LOCK"
# dictionary of all global lock instances
__GLOBAL_LOCKS = {
LOCK_TYPE_KERBEROS : threading.RLock()
}
def get_lock(lock_type):
"""
Gets the global lock associated with the specified type. This does not actually acquire
the lock, it simply returns the RLock instance. It is up to the caller to invoke RLock.acquire()
and RLock.release() correctly.
:param lock_type:
:return: a global threading.RLock() instance
:rtype: threading.RLock()
"""
if lock_type not in __GLOBAL_LOCKS:
raise Fail("There is no global lock associated with {0}".format(str(lock_type)))
return __GLOBAL_LOCKS[lock_type]
| {
"content_hash": "0f25c0079dd645902fcb61af036cfe12",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 98,
"avg_line_length": 36.733333333333334,
"alnum_prop": 0.7701149425287356,
"repo_name": "radicalbit/ambari",
"id": "72904c89293a130c6c0e030634f76daf0a81139c",
"size": "1675",
"binary": false,
"copies": "5",
"ref": "refs/heads/trunk",
"path": "ambari-common/src/main/python/resource_management/core/global_lock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
} |
from keystoneauth1.exceptions import base
class AuthPluginException(base.ClientException):
"""Something went wrong with auth plugins."""
class MissingAuthPlugin(AuthPluginException):
"""An authenticated request is required but no plugin available."""
class NoMatchingPlugin(AuthPluginException):
"""There were no auth plugins that could be created from the parameters
provided.
:param str name: The name of the plugin that was attempted to load.
.. py:attribute:: name
The name of the plugin that was attempted to load.
"""
def __init__(self, name):
self.name = name
msg = 'The plugin %s could not be found' % name
super(NoMatchingPlugin, self).__init__(msg)
class UnsupportedParameters(AuthPluginException):
"""A parameter that was provided or returned is not supported.
:param list(str) names: Names of the unsupported parameters.
.. py:attribute:: names
Names of the unsupported parameters.
"""
def __init__(self, names):
self.names = names
m = 'The following parameters were given that are unsupported: %s'
super(UnsupportedParameters, self).__init__(m % ', '.join(self.names))
| {
"content_hash": "5da8a948d0bbb71e0c6eb23457dbc548",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 78,
"avg_line_length": 28.25581395348837,
"alnum_prop": 0.6806584362139918,
"repo_name": "citrix-openstack-build/keystoneauth",
"id": "3747c50a03a1e9ad233db7dfdd1c37a52924782a",
"size": "1761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneauth1/exceptions/auth_plugins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "393336"
}
],
"symlink_target": ""
} |
import enum
def keywords():
"""
Builds the list of the keywords.
:return: the list of the keywords.
"""
keywords = []
keywords += Type.keywords()
keywords += Primitive.keywords()
keywords += Statement.keywords()
keywords += Accessor.keywords()
keywords += WellKnown.keywords()
return keywords
def tokens():
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
tokens = []
tokens += Type.tokens()
tokens += Primitive.tokens()
tokens += Statement.tokens()
tokens += Accessor.tokens()
tokens += WellKnown.tokens()
return tokens
def view():
"""
Builds the dictionary of the AML keywords.
:return: the dictionary of the AML keywords
"""
return {
**Type.view(),
**Primitive.view(),
**Statement.view(),
**Accessor.view(),
**WellKnown.view()
}
def rview():
"""
Builds the reverse dictionary of the AML keywords.
:return: the reverse dictionary of the AML keywords
"""
return {
**Type.rview(),
**Primitive.rview(),
**Statement.rview(),
**Accessor.rview(),
**WellKnown.rview()
}
@enum.unique
class Type(enum.Enum):
"""
Types supported by AML.
"""
VARIABLE = 'variable'
PACKET = 'packet'
FILTER = 'filter'
LIST = 'list'
@classmethod
def keywords(cls):
"""
Builds the list of the keywords.
:return: the list of the keywords
"""
return _keywords(cls)
@classmethod
def tokens(cls):
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
return _tokens(cls)
@classmethod
def view(cls):
"""
Builds the dictionary that represents the class.
:return: the dictionary that represents the class
"""
return _view(cls)
@classmethod
def rview(cls):
"""
Builds the reverse dictionary that represents the class.
:return: the reverse dictionary that represents the class
"""
return _rview(cls)
@enum.unique
class Primitive(enum.Enum):
"""
AML actions.
"""
# Physical primitives on nodes' components
DISABLECOMPONENT = 'disableComponent'
DECEIVECOMPONENT = 'deceiveComponent'
DESTROYCOMPONENT = 'destroyComponent'
# Physical primitives on nodes
MISPLACENODE = 'misplaceNode'
DESTROYNODE = 'destroyNode'
# Logical primitives on packets' fields
WRITEFIELD = 'writeField'
READFIELD = 'readField'
# Logical primitives on packets
FORWARDPACKET = 'forwardPacket'
CREATEPACKET = 'createPacket'
INJECTPACKET = 'injectPacket'
CLONEPACKET = 'clonePacket'
DROPPACKET = 'dropPacket'
@classmethod
def keywords(cls):
"""
Builds the list of the keywords.
:return: the list of the keywords
"""
return _keywords(cls)
@classmethod
def tokens(cls):
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
return _tokens(cls)
@classmethod
def view(cls):
"""
Builds the dictionary that represents the class.
:return: the dictionary that represents the class
"""
return _view(cls)
@classmethod
def rview(cls):
"""
Builds the reverse dictionary that represents the class.
:return: the reverse dictionary that represents the class
"""
return _rview(cls)
@enum.unique
class Statement(enum.Enum):
"""
AML statements.
"""
SCENARIO = 'scenario'
PACKETS = 'packets'
EVERY = 'every'
NODES = 'nodes'
FROM = 'from'
ONCE = 'once'
@classmethod
def keywords(cls):
"""
Builds the list of the keywords.
:return: the list of the keywords
"""
return _keywords(cls)
@classmethod
def tokens(cls):
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
return _tokens(cls)
@classmethod
def view(cls):
"""
Builds the dictionary that represents the class.
:return: the dictionary that represents the class
"""
return _view(cls)
@classmethod
def rview(cls):
"""
Builds the reverse dictionary that represents the class.
:return: the reverse dictionary that represents the class
"""
return _rview(cls)
@enum.unique
class Accessor(enum.Enum):
"""
AML accessors.
"""
MATCHING = 'matching'
FOR = 'for'
IN = 'in'
@classmethod
def keywords(cls):
"""
Builds the list of the keywords.
:return: the list of the keywords
"""
return _keywords(cls)
@classmethod
def tokens(cls):
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
return _tokens(cls)
@classmethod
def view(cls):
"""
Builds the dictionary that represents the class.
:return: the dictionary that represents the class
"""
return _view(cls)
@classmethod
def rview(cls):
"""
Builds the reverse dictionary that represents the class.
:return: the reverse dictionary that represents the class
"""
return _rview(cls)
@enum.unique
class WellKnown(enum.Enum):
"""
AML well known values.
"""
CAPTURED = 'captured'
SELF = 'self'
TX = 'tx'
RX = 'rx'
US = 'us'
MS = 'ms'
S = 's'
@classmethod
def keywords(cls):
"""
Builds the list of the keywords.
:return: the list of the keywords
"""
return _keywords(cls)
@classmethod
def tokens(cls):
"""
Builds the list of the tokens.
:return: the list of the tokens
"""
return _tokens(cls)
@classmethod
def view(cls):
"""
Builds the dictionary that represents the class.
:return: the dictionary that represents the class
"""
return _view(cls)
@classmethod
def rview(cls):
"""
Builds the reverse dictionary that represents the class.
:return: the reverse dictionary that represents the class
"""
return _rview(cls)
def _keywords(cls):
"""
Builds the list of the values of an enum.Enum class.
:return: the list of the values
"""
if not isinstance(cls, enum.EnumMeta):
raise TypeError(str(cls.__class__.__name__) + " not supported")
keywords = []
for e in cls:
keywords.append(e.value)
return keywords
def _tokens(cls):
"""
Builds the list of the names of an enum.Enum class.
:return: the list of the names
"""
if not isinstance(cls, enum.EnumMeta):
raise TypeError(str(cls.__class__.__name__) + " not supported")
tokens = []
for e in cls:
tokens.append(e.name)
return tokens
def _view(cls):
"""
Builds the dictionary mapping the names with the related values of an enum.Enum class.
:returns: the dictionary mapping the keywords with the releated tokens
"""
if not isinstance(cls, enum.EnumMeta):
raise TypeError(str(cls.__class__.__name__) + " not supported")
view = {}
for e in cls:
view[e.name] = e.value
return view
def _rview(cls):
"""
Builds the reverse dictionary mapping the names with the related values of an enum.Enum class.
:returns: the dictionary mapping the keywords with the releated tokens
"""
if not isinstance(cls, enum.EnumMeta):
raise TypeError(str(cls.__class__.__name__) + " not supported")
rview = {}
for e in cls:
rview[e.value] = e.name
return rview
| {
"content_hash": "ab872f3696050e053d31d7ea41e175e9",
"timestamp": "",
"source": "github",
"line_count": 370,
"max_line_length": 99,
"avg_line_length": 21.92972972972973,
"alnum_prop": 0.5612521567660833,
"repo_name": "francescoracciatti/aml",
"id": "3040c31f4a9c56a1604365c28d7b6fb166271547",
"size": "8396",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aml/lexer/keywords.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "148475"
},
{
"name": "Shell",
"bytes": "520"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, division, absolute_import, print_function)
from powerline.renderers.shell import ShellRenderer
from powerline.theme import Theme
class IPythonRenderer(ShellRenderer):
'''Powerline ipython segment renderer.'''
def get_segment_info(self, segment_info, mode):
r = self.segment_info.copy()
r['ipython'] = segment_info
return r
def get_theme(self, matcher_info):
if matcher_info == 'in':
return self.theme
else:
match = self.local_themes[matcher_info]
try:
return match['theme']
except KeyError:
match['theme'] = Theme(
theme_config=match['config'],
main_theme_config=self.theme_config,
**self.theme_kwargs
)
return match['theme']
def shutdown(self):
self.theme.shutdown()
for match in self.local_themes.values():
if 'theme' in match:
match['theme'].shutdown()
def render(self, *args, **kwargs):
# XXX super(ShellRenderer), *not* super(IPythonRenderer)
return super(ShellRenderer, self).render(*args, **kwargs)
class IPythonPromptRenderer(IPythonRenderer):
'''Powerline ipython prompt (in and in2) renderer'''
escape_hl_start = '\x01'
escape_hl_end = '\x02'
class IPythonNonPromptRenderer(IPythonRenderer):
'''Powerline ipython non-prompt (out and rewrite) renderer'''
pass
class RendererProxy(object):
'''Powerline IPython renderer proxy which chooses appropriate renderer
Instantiates two renderer objects: one will be used for prompts and the
other for non-prompts.
'''
def __init__(self, **kwargs):
old_widths = {}
self.non_prompt_renderer = IPythonNonPromptRenderer(old_widths=old_widths, **kwargs)
self.prompt_renderer = IPythonPromptRenderer(old_widths=old_widths, **kwargs)
def render_above_lines(self, *args, **kwargs):
return self.non_prompt_renderer.render_above_lines(*args, **kwargs)
def render(self, is_prompt, *args, **kwargs):
return (self.prompt_renderer if is_prompt else self.non_prompt_renderer).render(
*args, **kwargs)
def shutdown(self, *args, **kwargs):
self.prompt_renderer.shutdown(*args, **kwargs)
self.non_prompt_renderer.shutdown(*args, **kwargs)
renderer = RendererProxy
| {
"content_hash": "7871060d3959b0342a45576bdc6f42f1",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 86,
"avg_line_length": 29.18918918918919,
"alnum_prop": 0.7175925925925926,
"repo_name": "wfscheper/powerline",
"id": "985e6c34b62ef7cfd4ea044edb9c44d3348d6886",
"size": "2190",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "powerline/renderers/ipython/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3149"
},
{
"name": "Lua",
"bytes": "400"
},
{
"name": "Makefile",
"bytes": "1134"
},
{
"name": "Python",
"bytes": "693573"
},
{
"name": "Shell",
"bytes": "38472"
},
{
"name": "VimL",
"bytes": "12342"
}
],
"symlink_target": ""
} |
import datetime
import dateutil.parser
from tornado import gen, web
import base
import models
class AllRacesHandler(base.BaseHandler):
@gen.coroutine
@web.asynchronous
@base.authorized
def get(self, url):
self.tf.send({'profile.racelog.views': 1}, lambda x: x)
error = self.get_error()
year = datetime.date.today().year
user = yield self.get_current_user_async()
profile = yield models.get_user_by_url(self.redis, url)
races = models.Race.objects(user=profile).order_by('-date')
self.render('races.html', page_title='Racelog', user=user, today=datetime.date.today().strftime("%x"), error=error, this_year=year, profile=profile, races=races)
@base.authenticated_async
@web.asynchronous
@gen.coroutine
def post(self, url):
self.tf.send({'profile.racelog.adds': 1}, lambda x: x)
error = self.get_error()
year = datetime.date.today().year
user = yield self.get_current_user_async()
profile = yield models.get_user_by_url(self.redis, url)
if user.email != profile.email:
self.redirect_msg('/', {'error': 'You do not have permission to do add a run for this user.'})
return
date = self.get_argument('date', '')
date = dateutil.parser.parse(date, fuzzy=True)
name = self.get_argument('name', '')
distance = self.get_argument('distance', '')
distance = float(distance)
distance_units = self.get_argument('distance_units', '')
time = self.get_argument('time', '')
pacetime = self.get_argument('pacetime', 'time')
notes = self.get_argument('notes', '')
try:
time = models.time_to_seconds(time) if time != '' else 0
except ValueError, e:
msg = "The value you entered for time was not valid. Please enter your time in format HH:MM:SS or MM:SS or MM."
self.redirect_msg('/u/%s' % user.url, {'error': msg})
return
race = models.Race(
user=user,
name=name,
date=date,
distance=distance,
distance_units=distance_units,
notes=notes,
time=time)
race.save()
self.redirect(user.uri + '/races')
class ShowRaceHandler(base.BaseHandler):
@web.asynchronous
@gen.coroutine
@base.authorized
def get(self, userurl, raceid):
user = yield self.get_current_user_async()
profile = yield models.get_user_by_url(self.redis, userurl)
race = models.Race.objects(id=raceid).first()
if race == None:
# 404
self.send_error(404)
return
year = datetime.date.today().year
yield gen.Task(self.tf.send, {'profile.races.views': 1})
title = '{} raced {} - {} {}'.format(profile.display_name, race.name, race.distance, race.distance_units)
self.render('race.html', page_title=title, user=user, profile=profile, race=race, error=None, this_year=year)
| {
"content_hash": "0dacbbf8bcde2617b9af22253e387a9c",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 169,
"avg_line_length": 34.144444444444446,
"alnum_prop": 0.5942076147087536,
"repo_name": "JsonChiu/openrunlog",
"id": "3e5e6baa60677918a89901041d766c8ac91b7c8f",
"size": "3074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openrunlog/racelog.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "749"
},
{
"name": "HTML",
"bytes": "38395"
},
{
"name": "JavaScript",
"bytes": "71398"
},
{
"name": "Nginx",
"bytes": "1743"
},
{
"name": "Python",
"bytes": "99245"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
from asyncio.tasks import sleep
from collections import deque
from typing import Tuple, Callable, Union
from hwt.synthesizer.rtlLevel.constants import NOT_SPECIFIED
from hwtLib.peripheral.usb.constants import USB_VER, USB_PID
from hwtLib.peripheral.usb.descriptors.bundle import UsbDescriptorBundle, \
UsbNoSuchDescriptor
from hwtLib.peripheral.usb.descriptors.std import usb_descriptor_configuration_t, \
usb_descriptor_interface_t, USB_ENDPOINT_ATTRIBUTES_TRANSFER_TYPE, \
USB_ENDPOINT_DIR
from hwtLib.peripheral.usb.device_request import \
USB_REQUEST_TYPE_RECIPIENT, USB_REQUEST_TYPE_DIRECTION, \
USB_REQUEST_TYPE_TYPE
from hwtSimApi.process_utils import CallbackLoop, ExitCallbackLoop
from hwtSimApi.simCalendar import DONE
class USBIPPending:
def __init__(self, seqnum, device, xfer):
self.seqnum = seqnum
self.device = device
self.xfer = xfer
class USBIPOperationPromise():
def __init__(self, operation_process, sim, clk):
self.data = NOT_SPECIFIED
self.operation_process = operation_process(self)
self.sim = sim
self.cb = CallbackLoop(
sim, clk,
self._pool_process,
self._continue_with_clk_triggering)
def _continue_with_clk_triggering(self):
if self.data is not NOT_SPECIFIED:
raise ExitCallbackLoop()
return True
def schedule(self):
sim = self.sim
# [todo] potentialy not thread safe
# print("Try to schedule")
# next_time = 0 # sim._events._list[1]
cb = self.cb()
with sim.scheduler_lock:
while sim._current_time_slot is None or sim._current_time_slot.timeslot_end is not DONE:
pass
# sim._current_event_list.append(cb)
next_time = sim._events._list[0] + 2
sim._schedule_proc(next_time, cb)
# print("schedule done")
def _pool_process(self):
try:
next(self.operation_process)
except StopIteration:
assert self.data is not NOT_SPECIFIED
def fulfill(self, data):
self.data = data
async def pool_sleep_until_fulfil(self, t):
while self.data is NOT_SPECIFIED:
await sleep(t)
return self.data
class USBIPSimDevice():
"""
:attention: API of this component should be same as the device from library usb1 if possible
because we want to just swap the device object in sim and test the real device as well
"""
def __init__(self, usb_agent, addr:int, descriptors:UsbDescriptorBundle):
self.usb_agent = usb_agent
self.addr = addr
self.endp_next_pid = {}
self.descriptors = descriptors
def getBusNumber(self):
return 0
def getDeviceAddress(self):
return self.addr
def getDeviceSpeed(self):
v = int(self.descriptors[0].body.bcdUSB)
return USB_VER.from_uint16_t(v)
def getVendorID(self):
return int(self.descriptors[0].body.idVendor)
def getProductID(self):
return int(self.descriptors[0].body.idProduct)
def getbcdDevice(self):
return int(self.descriptors[0].body.bcdDevice)
def getDeviceClass(self):
return int(self.descriptors[0].body.bDeviceClass)
def getDeviceSubClass(self):
return int(self.descriptors[0].body.bDeviceSubClass)
def getDeviceProtocol(self):
return int(self.descriptors[0].body.bDeviceProtocol)
def getNumConfigurations(self):
return int(self.descriptors[0].body.bNumConfigurations)
def iterConfigurations(self):
descr_i = 0
while True:
try:
d = self.descriptors.get_descriptor(usb_descriptor_configuration_t, descr_i)
except UsbNoSuchDescriptor:
break
yield USBIPSimDeviceConfiguration(self, d[1])
descr_i += 1
def open(self):
return self
def close(self):
pass
def getDevice(self):
return self
async def controlRead(self, bmRequestType_recipient:USB_REQUEST_TYPE_RECIPIENT,
bmRequestType_type:USB_REQUEST_TYPE_TYPE,
bmRequestType_data_transfer_direction:USB_REQUEST_TYPE_DIRECTION,
bRequest, wValue, wIndex, wLength):
def execute_control_read(read_op):
# print("controlRead", self.addr, bmRequestType_recipient, bmRequestType_type, bmRequestType_data_transfer_direction, bRequest, wValue, wIndex, wLength)
read_data = yield from self.usb_agent.usb_driver.control_read(
self.addr, bmRequestType_type, bRequest, wValue, wIndex, wLength,
bmRequestType_recipient=bmRequestType_recipient,
bmRequestType_data_transfer_direction=bmRequestType_data_transfer_direction)
# print("controlRead-done", read_data)
read_op.fulfill(read_data)
read_op = USBIPOperationPromise(execute_control_read, self.usb_agent.sim, self.usb_agent.clk)
read_op.schedule()
d = await read_op.pool_sleep_until_fulfil(0.01)
return bytes(d)
async def controlWrite(self, bmRequestType_recipient:USB_REQUEST_TYPE_RECIPIENT,
bmRequestType_type:USB_REQUEST_TYPE_TYPE,
bmRequestType_data_transfer_direction:USB_REQUEST_TYPE_DIRECTION,
bRequest: int, wValue: int, wIndex: int, data):
"""
:return: The number of bytes actually sent.
"""
def execute_control_write(write_op):
# print("controlWrite", self.addr, bRequestType, bRequest, wValue, wIndex, data)
yield from self.usb_agent.usb_driver.control_write(
self.addr, 0, bmRequestType_type, bRequest, wValue, wIndex, data,
bmRequestType_recipient=bmRequestType_recipient,
bmRequestType_data_transfer_direction=bmRequestType_data_transfer_direction)
write_op.fulfill(True)
write_op = USBIPOperationPromise(execute_control_write, self.usb_agent.sim, self.usb_agent.clk)
write_op.schedule()
await write_op.pool_sleep_until_fulfil(0.01)
return len(data)
def getTransfer(self):
return USBIPTransfer(self)
class LIBUSB_TRANSFER_STATUS:
# Transfer completed without error. Note that this does not indicate
# that the entire amount of requested data was transferred.
TRANSFER_COMPLETED = 0
# Transfer failed
TRANSFER_ERROR = 1
# Transfer timed out
TRANSFER_TIMED_OUT = 2
# Transfer was cancelled
TRANSFER_CANCELLED = 3
# For bulk/interrupt endpoints: halt condition detected (endpoint
# stalled). For control endpoints: control request not supported.
TRANSFER_STALL = 4
# Device was disconnected
TRANSFER_NO_DEVICE = 5
# Device sent more data than requested
TRANSFER_OVERFLOW = 6
class USBIPTransfer():
def __init__(self, dev: USBIPSimDevice):
self.dev = dev
self.is_in = None
self.transfer_t = None
self.ep = None
self.buffer = None
self.len = None
self.callback = None
def setBulk(self, ep:int, buff_or_len:Union[int, bytes], callback:Callable[['USBIPTransfer'], None]):
self.is_in = ep >= 0x80
self.ep = ep & ~0x80
self.transfer_t = USB_ENDPOINT_ATTRIBUTES_TRANSFER_TYPE.BULK
if self.is_in:
assert isinstance(buff_or_len, int), buff_or_len
self.len = buff_or_len
else:
assert isinstance(buff_or_len, (list, tuple, bytes, deque))
self.buffer = buff_or_len
self.callback = callback
def _next_bulk_pid(self, pid):
if pid == USB_PID.DATA_0:
return USB_PID.DATA_1
elif pid == USB_PID.DATA_1:
return USB_PID.DATA_0
else:
raise NotImplementedError(pid)
async def submit(self):
if self.transfer_t == USB_ENDPOINT_ATTRIBUTES_TRANSFER_TYPE.BULK:
if self.is_in:
def execute_bulk(read_op):
# print("IN", self.dev.addr, self.ep)
epk = (self.ep, USB_ENDPOINT_DIR.IN)
pid = self.dev.endp_next_pid.setdefault(epk, USB_PID.DATA_0)
read_data = yield from self.dev.usb_agent.usb_driver.receive_bulk(
self.dev.addr, self.ep, pid, 512)
# print("IN-done", read_data)
if read_data is None:
read_data = [] # nack, do not update data pid
else:
self.dev.endp_next_pid[epk] = self._next_bulk_pid(pid)
read_op.fulfill(read_data)
else:
def execute_bulk(read_op):
# print("OUT", self.dev.addr, self.ep, self.buffer)
epk = (self.ep, USB_ENDPOINT_DIR.OUT)
pid = self.dev.endp_next_pid.setdefault(epk, USB_PID.DATA_0)
yield from self.dev.usb_agent.usb_driver.transmit_bulk(
self.dev.addr, self.ep, pid, self.buffer)
# print("OUT-done")
self.dev.endp_next_pid[epk] = self._next_bulk_pid(pid)
read_op.fulfill(None)
op = USBIPOperationPromise(execute_bulk, self.dev.usb_agent.sim, self.dev.usb_agent.clk)
op.schedule()
buffer = await op.pool_sleep_until_fulfil(0.01)
if self.is_in and buffer is not None:
self.buffer = list(buffer)
self.callback(self)
else:
raise NotImplementedError()
def getBuffer(self):
return self.buffer
def getActualLength(self):
return len(self.buffer)
def getStatus(self):
return LIBUSB_TRANSFER_STATUS.TRANSFER_COMPLETED
class USBIPDevice:
def __init__(self, devid: Tuple[int, int], hnd: USBIPSimDevice):
self.devid = devid
self.hnd = hnd
def packDevid(self):
# dev.getBusNumber() << 16 | dev.getDeviceAddress()
did = self.devid
return (did[0] << 16 | did[1])
class USBIPSimDeviceConfiguration():
def __init__(self, dev: USBIPSimDevice, config_descr:usb_descriptor_configuration_t):
self.dev = dev
self.config_descr = config_descr
def getConfigurationValue(self):
return 1
def getNumInterfaces(self):
return int(self.config_descr.body.bNumInterfaces)
def iterInterfaces(self):
descr_i = 0
while True:
try:
d = self.dev.descriptors.get_descriptor(usb_descriptor_interface_t, descr_i)
except UsbNoSuchDescriptor:
break
yield USBIPSimDeviceInterface(self, d[1])
descr_i += 1
class USBIPSimDeviceInterface():
def __init__(self, conf: USBIPSimDeviceConfiguration, descr: usb_descriptor_interface_t):
self.conf = conf
self.descr = descr
def iterSettings(self):
yield USBIPSimDeviceInterfaceSetting(self)
class USBIPSimDeviceInterfaceSetting():
def __init__(self, intf: USBIPSimDeviceInterface):
self.intf = intf
def getClass(self):
return int(self.intf.descr.body.bInterfaceClass)
def getSubClass(self):
return int(self.intf.descr.body.bInterfaceSubClass)
def getProtocol(self):
return int(self.intf.descr.body.bInterfaceProtocol)
| {
"content_hash": "ce6d1ff21af4608577e7f2db0b7d9e24",
"timestamp": "",
"source": "github",
"line_count": 338,
"max_line_length": 164,
"avg_line_length": 34.085798816568044,
"alnum_prop": 0.6155715649683188,
"repo_name": "Nic30/hwtLib",
"id": "04f4de6a0d1c3591d2eb4deca4c0aeff5e0f7240",
"size": "11521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hwtLib/peripheral/usb/sim/usbip/device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "41560"
},
{
"name": "Python",
"bytes": "2523349"
},
{
"name": "VHDL",
"bytes": "117346"
},
{
"name": "Verilog",
"bytes": "36444"
}
],
"symlink_target": ""
} |
import os.path
import re
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), "README.rst")
long_description = open(README).read().strip() + "\n\n"
def find_version(*file_paths):
version_file_path = os.path.join(os.path.dirname(__file__), *file_paths)
with open(version_file_path) as f:
version_file = f.read()
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
install_requires = ["lxml", "cssselect", "cssutils", "requests", "cachetools"]
tests_require = ["nose", "mock"]
setup(
name="premailer",
version=find_version("premailer", "__init__.py"),
description="Turns CSS blocks into style attributes",
long_description=long_description,
keywords="html lxml email mail style",
author="Peter Bengtsson",
author_email="mail@peterbe.com",
url="http://github.com/peterbe/premailer",
license="Python",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Other Environment",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Python Software Foundation License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Communications",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Other/Nonlisted Topic",
"Topic :: Software Development :: Libraries :: Python Modules",
],
packages=find_packages(),
include_package_data=True,
test_suite="nose.collector",
tests_require=tests_require,
extras_require={
"dev": ["tox", "twine", "therapist", "black", "flake8", "wheel"],
"test": tests_require,
},
zip_safe=False,
install_requires=install_requires,
)
| {
"content_hash": "f2420ff825f1531896cae79c874746a3",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 35.63076923076923,
"alnum_prop": 0.6234887737478411,
"repo_name": "peterbe/premailer",
"id": "86a6cf487025f28a23551202a58f812a91f5f563",
"size": "2316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "243"
},
{
"name": "HTML",
"bytes": "12615"
},
{
"name": "Python",
"bytes": "126395"
},
{
"name": "Shell",
"bytes": "145"
}
],
"symlink_target": ""
} |
def extractMahouShoujoIkuseiKeikakuWikiaCom(item):
'''
DISABLED
Parser for 'mahou-shoujo-ikusei-keikaku.wikia.com'
'''
return None | {
"content_hash": "6a2915b434fed8113d7ef44a578e1d17",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 51,
"avg_line_length": 19.571428571428573,
"alnum_prop": 0.7664233576642335,
"repo_name": "fake-name/ReadableWebProxy",
"id": "857685212194effa485854bdf6ace5cd4bb47d5c",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractMahouShoujoIkuseiKeikakuWikiaCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting SVC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_sovereigncoin.config(dbdir):
"""Read the sovereigncoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "sovereigncoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 127004 if testnet else 27004
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the sovereigncoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(sovereigncoind):
info = sovereigncoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
sovereigncoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = sovereigncoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(sovereigncoind):
address_summary = dict()
address_to_account = dict()
for info in sovereigncoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = sovereigncoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = sovereigncoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(sovereigncoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(sovereigncoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f SVC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to sovereigncoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = sovereigncoind.createrawtransaction(inputs, outputs)
signed_rawtx = sovereigncoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(sovereigncoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = sovereigncoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(sovereigncoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = sovereigncoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(sovereigncoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of sovereigncoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_sovereigncoin.config(options.datadir)
if options.testnet: config['testnet'] = True
sovereigncoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(sovereigncoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(sovereigncoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(sovereigncoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(sovereigncoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = sovereigncoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| {
"content_hash": "28d69a60d3c419f13ee38baa02757584",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 112,
"avg_line_length": 39.11507936507937,
"alnum_prop": 0.6226032261337121,
"repo_name": "cryptocurinfo/sovereigncoin",
"id": "e6c88aac6f768fdfead69e4ebba9f44e51d9a491",
"size": "10247",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "91288"
},
{
"name": "C++",
"bytes": "2441760"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Objective-C++",
"bytes": "2734"
},
{
"name": "Python",
"bytes": "37467"
},
{
"name": "Shell",
"bytes": "2575"
},
{
"name": "TypeScript",
"bytes": "5258627"
}
],
"symlink_target": ""
} |
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import unittest
from sample.utils.string.case_converter import toLowerCases, toUpperCases
class BasicTestSuite(unittest.TestCase):
"""
Test sample.utils.string.case_converter
"""
def testCaseConverter(self):
str = toLowerCases("a Simple Test")
assert(str == "a simple test")
str = toUpperCases("a Simple Test")
assert(str == "A SIMPLE TEST")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "30568af6090eaa878162302fc2e8c51f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 82,
"avg_line_length": 27.25,
"alnum_prop": 0.653211009174312,
"repo_name": "dqi2018/python-structure",
"id": "6913bc629086bf8f1ce914daecd604ac0ba361cd",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2663"
}
],
"symlink_target": ""
} |
import pandas as pd
from pytrends.request import TrendReq
pytrends = TrendReq(tz=0) #tz=0 puts us on UTC
from dates import get_start_times, get_end_times, date_to_epoch, get_all_dates
from data_generator import get_missing_data_point
NUM_TRENDING = 10 # number of trending searches to return
def get_updated_daily_data(entity):
"""
entity : the datastore entity for this investment
returns a pandas dataframe of daily data for the query from the investment date to today
"""
investment_date = entity['initial_date']
search_term = entity['search_term']
start = get_start_times(investment_date)
end = get_end_times()
hourly_data = fetch_hourly_data(search_term, *start, *end)
daily_data = aggregate_hourly_to_daily(hourly_data, entity)
complete_data = backfill_missing_data_as_necessary(daily_data, entity)
return complete_data
def fetch_hourly_data(search_term, year_start, month_start, day_start, year_end, month_end, day_end):
"""
search_term: a search to retrieve data for
returns a pandas dataframe of hourly data for each search_term over the given time range
"""
# geo = '' default to world-wide search
# gprop = '' defaults to web-searches (rather than limitting to, eg, YouTube searches)
hourly_data = pytrends.get_historical_interest(
[search_term],
year_start=year_start,
month_start=month_start,
day_start=day_start,
hour_start=0,
year_end=year_end,
month_end=month_end,
day_end=day_end,
hour_end=23,
geo='',
gprop=''
)
return hourly_data
def aggregate_hourly_to_daily(hourly_df, entity):
"""
hourly_df : a dataframe of hourly search data
returns a dictionary of aggregated data
"""
search_term = entity['search_term']
new_data = {
"search_term": search_term
}
count = 0
day_total = 0
for datetime, row in hourly_df.iterrows():
day_total += row[search_term]
count += 1
if count % 24 == 0:
#finished accumulating day data
epoch = date_to_epoch(datetime)
new_data[str(epoch)] = day_total
#reset counters
day_total = 0
count = 0
return new_data
def backfill_missing_data_as_necessary(daily_data, entity):
"""
Verify that we have a complete data set for the required dates. If not, fill in the blanks
with old potentially outdated data or worst case scenario, random data
"""
required_dates = get_all_dates(entity['initial_date'])
for date in required_dates:
# convert date to string for datastore indexing purposes
date_str = str(date)
# NOTE : Pytrends fails in one of two ways: Mostly it returns blank data, in which case the date_str
# will not be present in the dataframe. But sometimes it returns a stream of 0s. Since it is near impossible
# to get 24 0 points in a row for legitimate reasons, we here treat any daily datapoint of 0 as missing data.
if date_str in daily_data and daily_data[date_str] != 0:
# we have successfully retrieved data for this date
continue
elif hasattr(entity, date_str):
# we have old data in the database. default to this
daily_data[date_str] = entity[date_str]
else:
# we have no data anywhere for this date. This *shouldn't* happen often.
val = get_missing_data_point(required_dates, daily_data, date)
daily_data[date_str] = val
daily_data['initial_date'] = entity['initial_date']
daily_data['latest_date'] = required_dates[-1]
return daily_data
def get_trending_searches():
"""
Returns a dictionary of the top 10 trending searches, indexed by rank
"""
df = pytrends.trending_searches()
data = {}
for index, row in df.iterrows():
if index >= 10:
break
data[str(index+1)] = row[0]
return data
| {
"content_hash": "8ccfd50d7f50352639cf471464844084",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 117,
"avg_line_length": 33.53333333333333,
"alnum_prop": 0.6399105367793241,
"repo_name": "googleinterns/sgonks",
"id": "8109250fac2e3d25af1edb9ea443cd9cc2301be9",
"size": "4626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/services/data_updater/scripts/fetch_trends.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "20201"
},
{
"name": "HTML",
"bytes": "1721"
},
{
"name": "Java",
"bytes": "114216"
},
{
"name": "JavaScript",
"bytes": "71282"
},
{
"name": "Makefile",
"bytes": "711"
},
{
"name": "Python",
"bytes": "16524"
}
],
"symlink_target": ""
} |
import unittest
import CCSDS.FRAME, testData
#############
# test case #
#############
class TestFRAME(unittest.TestCase):
def test(self):
"""test the transfer frame data units"""
tmFrame0 = CCSDS.FRAME.TMframe()
self.assertEqual(tmFrame0.versionNumber, 0)
self.assertEqual(tmFrame0.spacecraftId, 0)
self.assertEqual(tmFrame0.virtualChannelId, 0)
self.assertEqual(tmFrame0.operationalControlField, 0)
self.assertEqual(tmFrame0.masterChannelFrameCount, 0)
self.assertEqual(tmFrame0.virtualChannelFCountLow, 0)
self.assertEqual(tmFrame0.secondaryHeaderFlag, 0)
self.assertEqual(tmFrame0.synchronisationFlag, 0)
self.assertEqual(tmFrame0.packetOrderFlag, 0)
self.assertEqual(tmFrame0.segmentLengthId, 0)
self.assertEqual(tmFrame0.firstHeaderPointer, 0)
tmFrame1 = CCSDS.FRAME.TMframe(testData.TM_FRAME_01)
self.assertEqual(tmFrame1.versionNumber,
testData.TM_FRAME_01_versionNumber)
self.assertEqual(tmFrame1.spacecraftId,
testData.TM_FRAME_01_spacecraftId)
self.assertEqual(tmFrame1.virtualChannelId,
testData.TM_FRAME_01_virtualChannelId)
self.assertEqual(tmFrame1.operationalControlField,
testData.TM_FRAME_01_operationalControlField)
self.assertEqual(tmFrame1.masterChannelFrameCount,
testData.TM_FRAME_01_masterChannelFrameCount)
self.assertEqual(tmFrame1.virtualChannelFCountLow,
testData.TM_FRAME_01_virtualChannelFCountLow)
self.assertEqual(tmFrame1.secondaryHeaderFlag,
testData.TM_FRAME_01_secondaryHeaderFlag)
self.assertEqual(tmFrame1.synchronisationFlag,
testData.TM_FRAME_01_synchronisationFlag)
self.assertEqual(tmFrame1.packetOrderFlag,
testData.TM_FRAME_01_packetOrderFlag)
self.assertEqual(tmFrame1.segmentLengthId,
testData.TM_FRAME_01_segmentLengthId)
self.assertEqual(tmFrame1.firstHeaderPointer,
testData.TM_FRAME_01_firstHeaderPointer)
# extract packets and check it
leadingFragment, packets, trailingFragment = tmFrame1.getPackets()
self.assertEqual(leadingFragment,
testData.TM_FRAME_01_leadingFragment)
self.assertEqual(len(packets), testData.TM_FRAME_01_nrPackets)
self.assertEqual(trailingFragment,
testData.TM_FRAME_01_trailingFragment)
tcFrame1 = CCSDS.FRAME.TCframe(testData.TC_FRAME_01)
self.assertEqual(tcFrame1.versionNumber,
testData.TC_FRAME_01_versionNumber)
self.assertEqual(tcFrame1.reservedFieldB,
testData.TC_FRAME_01_reservedFieldB)
self.assertEqual(tcFrame1.virtualChannelId,
testData.TC_FRAME_01_virtualChannelId)
self.assertEqual(tcFrame1.controlCommandFlag,
testData.TC_FRAME_01_controlCommandFlag)
self.assertEqual(tcFrame1.reservedFieldA,
testData.TC_FRAME_01_reservedFieldA)
self.assertEqual(tcFrame1.frameLength,
testData.TC_FRAME_01_frameLength)
self.assertEqual(tcFrame1.sequenceNumber,
testData.TC_FRAME_01_sequenceNumber)
self.assertEqual(tcFrame1.spacecraftId,
testData.TC_FRAME_01_spacecraftId)
self.assertEqual(tcFrame1.bypassFlag,
testData.TC_FRAME_01_bypassFlag)
tcFrame2 = CCSDS.FRAME.TCframe(testData.TC_FRAME_02)
self.assertEqual(tcFrame2.versionNumber,
testData.TC_FRAME_02_versionNumber)
self.assertEqual(tcFrame2.reservedFieldB,
testData.TC_FRAME_02_reservedFieldB)
self.assertEqual(tcFrame2.virtualChannelId,
testData.TC_FRAME_02_virtualChannelId)
self.assertEqual(tcFrame2.controlCommandFlag,
testData.TC_FRAME_02_controlCommandFlag)
self.assertEqual(tcFrame2.reservedFieldA,
testData.TC_FRAME_02_reservedFieldA)
self.assertEqual(tcFrame2.frameLength,
testData.TC_FRAME_02_frameLength)
self.assertEqual(tcFrame2.sequenceNumber,
testData.TC_FRAME_02_sequenceNumber)
self.assertEqual(tcFrame2.spacecraftId,
testData.TC_FRAME_02_spacecraftId)
self.assertEqual(tcFrame2.bypassFlag,
testData.TC_FRAME_02_bypassFlag)
clcw = CCSDS.FRAME.CLCW()
self.assertEqual(clcw.type, 0)
self.assertEqual(clcw.version, 0)
self.assertEqual(clcw.statusField, 0)
self.assertEqual(clcw.copInEffect, 0)
self.assertEqual(clcw.virtualChannelId, 0)
self.assertEqual(clcw.spareField, 0)
self.assertEqual(clcw.noRfAvailable, 0)
self.assertEqual(clcw.noBitLock, 0)
self.assertEqual(clcw.lockout, 0)
self.assertEqual(clcw.wait, 0)
self.assertEqual(clcw.retransmit, 0)
self.assertEqual(clcw.farmBcounter, 0)
self.assertEqual(clcw.reportType, 0)
self.assertEqual(clcw.reportValue, 0)
########
# main #
########
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "bcfb6995cf91b2eafed828e37a0764f2",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 70,
"avg_line_length": 46.372727272727275,
"alnum_prop": 0.6828072926877083,
"repo_name": "Stefan-Korner/SpacePyLibrary",
"id": "c2f051f4af4eff45fa5073729e61e760fa01a445",
"size": "6244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UnitTest/testFRAME.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11666"
},
{
"name": "Python",
"bytes": "1264766"
},
{
"name": "Shell",
"bytes": "23787"
}
],
"symlink_target": ""
} |
__all__ = ['Parser', 'ParserError']
from error import MarkedYAMLError
from tokens import *
from events import *
from scanner import *
class ParserError(MarkedYAMLError):
pass
class Parser:
# Since writing a recursive-descendant parser is a straightforward task, we
# do not give many comments here.
# Note that we use Python generators. If you rewrite the parser in another
# language, you may replace all 'yield'-s with event handler calls.
DEFAULT_TAGS = {
u'!': u'!',
u'!!': u'tag:yaml.org,2002:',
}
def __init__(self):
self.current_event = None
self.yaml_version = None
self.tag_handles = {}
self.event_generator = self.parse_stream()
def check_event(self, *choices):
# Check the type of the next event.
if self.current_event is None:
try:
self.current_event = self.event_generator.next()
except StopIteration:
pass
if self.current_event is not None:
if not choices:
return True
for choice in choices:
if isinstance(self.current_event, choice):
return True
return False
def peek_event(self):
# Get the next event.
if self.current_event is None:
try:
self.current_event = self.event_generator.next()
except StopIteration:
pass
return self.current_event
def get_event(self):
# Get the next event.
if self.current_event is None:
try:
self.current_event = self.event_generator.next()
except StopIteration:
pass
value = self.current_event
self.current_event = None
return value
def __iter__(self):
# Iterator protocol.
return self.event_generator
def parse_stream(self):
# STREAM-START implicit_document? explicit_document* STREAM-END
# Parse start of stream.
token = self.get_token()
yield StreamStartEvent(token.start_mark, token.end_mark,
encoding=token.encoding)
# Parse implicit document.
if not self.check_token(DirectiveToken, DocumentStartToken,
StreamEndToken):
self.tag_handles = self.DEFAULT_TAGS
token = self.peek_token()
start_mark = end_mark = token.start_mark
yield DocumentStartEvent(start_mark, end_mark,
explicit=False)
for event in self.parse_block_node():
yield event
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
while self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit = True
yield DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Parse explicit documents.
while not self.check_token(StreamEndToken):
token = self.peek_token()
start_mark = token.start_mark
version, tags = self.process_directives()
if not self.check_token(DocumentStartToken):
raise ParserError(None, None,
"expected '<document start>', but found %r"
% self.peek_token().id,
self.peek_token().start_mark)
token = self.get_token()
end_mark = token.end_mark
yield DocumentStartEvent(start_mark, end_mark,
explicit=True, version=version, tags=tags)
if self.check_token(DirectiveToken,
DocumentStartToken, DocumentEndToken, StreamEndToken):
yield self.process_empty_scalar(token.end_mark)
else:
for event in self.parse_block_node():
yield event
token = self.peek_token()
start_mark = end_mark = token.start_mark
explicit = False
while self.check_token(DocumentEndToken):
token = self.get_token()
end_mark = token.end_mark
explicit=True
yield DocumentEndEvent(start_mark, end_mark,
explicit=explicit)
# Parse end of stream.
token = self.get_token()
yield StreamEndEvent(token.start_mark, token.end_mark)
def process_directives(self):
# DIRECTIVE*
self.yaml_version = None
self.tag_handles = {}
while self.check_token(DirectiveToken):
token = self.get_token()
if token.name == u'YAML':
if self.yaml_version is not None:
raise ParserError(None, None,
"found duplicate YAML directive", token.start_mark)
major, minor = token.value
if major != 1:
raise ParserError(None, None,
"found incompatible YAML document (version 1.* is required)",
token.start_mark)
self.yaml_version = token.value
elif token.name == u'TAG':
handle, prefix = token.value
if handle in self.tag_handles:
raise ParserError(None, None,
"duplicate tag handle %r" % handle.encode('utf-8'),
token.start_mark)
self.tag_handles[handle] = prefix
if self.tag_handles:
value = self.yaml_version, self.tag_handles.copy()
else:
value = self.yaml_version, None
for key in self.DEFAULT_TAGS:
if key not in self.tag_handles:
self.tag_handles[key] = self.DEFAULT_TAGS[key]
return value
def parse_block_node(self):
return self.parse_node(block=True)
def parse_flow_node(self):
return self.parse_node()
def parse_block_node_or_indentless_sequence(self):
return self.parse_node(block=True, indentless_sequence=True)
def parse_node(self, block=False, indentless_sequence=False):
# block_node ::= ALIAS | properties? block_content
# flow_node ::= ALIAS | properties? flow_content
# properties ::= TAG ANCHOR? | ANCHOR TAG?
# block_content ::= block_collection | flow_collection | SCALAR
# flow_content ::= flow_collection | SCALAR
# block_collection ::= block_sequence | block_mapping
# block_node_or_indentless_sequence ::= ALIAS | properties?
# (block_content | indentless_block_sequence)
if self.check_token(AliasToken):
token = self.get_token()
yield AliasEvent(token.value, token.start_mark, token.end_mark)
else:
anchor = None
tag = None
start_mark = end_mark = tag_mark = None
if self.check_token(AnchorToken):
token = self.get_token()
start_mark = token.start_mark
end_mark = token.end_mark
anchor = token.value
if self.check_token(TagToken):
token = self.get_token()
tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
elif self.check_token(TagToken):
token = self.get_token()
start_mark = tag_mark = token.start_mark
end_mark = token.end_mark
tag = token.value
if self.check_token(AnchorToken):
token = self.get_token()
end_mark = token.end_mark
anchor = token.value
if tag is not None and tag != u'!':
handle, suffix = tag
if handle is not None:
if handle not in self.tag_handles:
raise ParserError("while parsing a node", start_mark,
"found undefined tag handle %r" % handle.encode('utf-8'),
tag_mark)
tag = self.tag_handles[handle]+suffix
else:
tag = suffix
#if tag == u'!':
# raise ParserError("while parsing a node", start_mark,
# "found non-specific tag '!'", tag_mark,
# "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
if start_mark is None:
start_mark = end_mark = self.peek_token().start_mark
event = None
collection_events = None
implicit = (tag is None or tag == u'!')
if indentless_sequence and self.check_token(BlockEntryToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark)
collection_events = self.parse_indentless_sequence()
else:
if self.check_token(ScalarToken):
token = self.get_token()
end_mark = token.end_mark
if (token.plain and tag is None) or tag == u'!':
implicit = (True, False)
elif tag is None:
implicit = (False, True)
else:
implicit = (False, False)
event = ScalarEvent(anchor, tag, implicit, token.value,
start_mark, end_mark, style=token.style)
elif self.check_token(FlowSequenceStartToken):
end_mark = self.peek_token().end_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
collection_events = self.parse_flow_sequence()
elif self.check_token(FlowMappingStartToken):
end_mark = self.peek_token().end_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=True)
collection_events = self.parse_flow_mapping()
elif block and self.check_token(BlockSequenceStartToken):
end_mark = self.peek_token().start_mark
event = SequenceStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
collection_events = self.parse_block_sequence()
elif block and self.check_token(BlockMappingStartToken):
end_mark = self.peek_token().start_mark
event = MappingStartEvent(anchor, tag, implicit,
start_mark, end_mark, flow_style=False)
collection_events = self.parse_block_mapping()
elif anchor is not None or tag is not None:
# Empty scalars are allowed even if a tag or an anchor is
# specified.
event = ScalarEvent(anchor, tag, (implicit, False), u'',
start_mark, end_mark)
else:
if block:
node = 'block'
else:
node = 'flow'
token = self.peek_token()
raise ParserError("while scanning a %s node" % node, start_mark,
"expected the node content, but found %r" % token.id,
token.start_mark)
yield event
if collection_events is not None:
for event in collection_events:
yield event
def parse_block_sequence(self):
# BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
token = self.get_token()
start_mark = token.start_mark
while self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken, BlockEndToken):
for event in self.parse_block_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while scanning a block collection", start_mark,
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
yield SequenceEndEvent(token.start_mark, token.end_mark)
def parse_indentless_sequence(self):
# (BLOCK-ENTRY block_node?)+
while self.check_token(BlockEntryToken):
token = self.get_token()
if not self.check_token(BlockEntryToken,
KeyToken, ValueToken, BlockEndToken):
for event in self.parse_block_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
token = self.peek_token()
yield SequenceEndEvent(token.start_mark, token.start_mark)
def parse_block_mapping(self):
# BLOCK-MAPPING_START
# ((KEY block_node_or_indentless_sequence?)?
# (VALUE block_node_or_indentless_sequence?)?)*
# BLOCK-END
token = self.get_token()
start_mark = token.start_mark
while self.check_token(KeyToken, ValueToken):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
for event in self.parse_block_node_or_indentless_sequence():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(KeyToken, ValueToken, BlockEndToken):
for event in self.parse_block_node_or_indentless_sequence():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
else:
token = self.peek_token()
yield self.process_empty_scalar(token.start_mark)
if not self.check_token(BlockEndToken):
token = self.peek_token()
raise ParserError("while scanning a block mapping", start_mark,
"expected <block end>, but found %r" % token.id, token.start_mark)
token = self.get_token()
yield MappingEndEvent(token.start_mark, token.end_mark)
def parse_flow_sequence(self):
# flow_sequence ::= FLOW-SEQUENCE-START
# (flow_sequence_entry FLOW-ENTRY)*
# flow_sequence_entry?
# FLOW-SEQUENCE-END
# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
#
# Note that while production rules for both flow_sequence_entry and
# flow_mapping_entry are equal, their interpretations are different.
# For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
# generate an inline mapping (set syntax).
token = self.get_token()
start_mark = token.start_mark
while not self.check_token(FlowSequenceEndToken):
if self.check_token(KeyToken):
token = self.get_token()
yield MappingStartEvent(None, None, True,
token.start_mark, token.end_mark,
flow_style=True)
if not self.check_token(ValueToken,
FlowEntryToken, FlowSequenceEndToken):
for event in self.parse_flow_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
for event in self.parse_flow_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
else:
token = self.peek_token()
yield self.process_empty_scalar(token.start_mark)
token = self.peek_token()
yield MappingEndEvent(token.start_mark, token.start_mark)
else:
for event in self.parse_flow_node():
yield event
if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
token = self.peek_token()
raise ParserError("while scanning a flow sequence", start_mark,
"expected ',' or ']', but got %r" % token.id, token.start_mark)
if self.check_token(FlowEntryToken):
self.get_token()
token = self.get_token()
yield SequenceEndEvent(token.start_mark, token.end_mark)
def parse_flow_mapping(self):
# flow_mapping ::= FLOW-MAPPING-START
# (flow_mapping_entry FLOW-ENTRY)*
# flow_mapping_entry?
# FLOW-MAPPING-END
# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
token = self.get_token()
start_mark = token.start_mark
while not self.check_token(FlowMappingEndToken):
if self.check_token(KeyToken):
token = self.get_token()
if not self.check_token(ValueToken,
FlowEntryToken, FlowMappingEndToken):
for event in self.parse_flow_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
if self.check_token(ValueToken):
token = self.get_token()
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
for event in self.parse_flow_node():
yield event
else:
yield self.process_empty_scalar(token.end_mark)
else:
token = self.peek_token()
yield self.process_empty_scalar(token.start_mark)
else:
for event in self.parse_flow_node():
yield event
yield self.process_empty_scalar(self.peek_token().start_mark)
if not self.check_token(FlowEntryToken, FlowMappingEndToken):
token = self.peek_token()
raise ParserError("while scanning a flow mapping", start_mark,
"expected ',' or '}', but got %r" % token.id, token.start_mark)
if self.check_token(FlowEntryToken):
self.get_token()
if not self.check_token(FlowMappingEndToken):
token = self.peek_token()
raise ParserError("while scanning a flow mapping", start_mark,
"expected '}', but found %r" % token.id, token.start_mark)
token = self.get_token()
yield MappingEndEvent(token.start_mark, token.end_mark)
def process_empty_scalar(self, mark):
return ScalarEvent(None, None, (True, False), u'', mark, mark)
| {
"content_hash": "32520c2bfca25ee1664f1d3336d1b1ef",
"timestamp": "",
"source": "github",
"line_count": 438,
"max_line_length": 108,
"avg_line_length": 45.10730593607306,
"alnum_prop": 0.5266487827099255,
"repo_name": "dproc/trex_odp_porting_integration",
"id": "2aec0fe33f4cd60b1c0e76e0c73d26204045733a",
"size": "22563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9616073"
},
{
"name": "C++",
"bytes": "3147123"
},
{
"name": "CMake",
"bytes": "8882"
},
{
"name": "HTML",
"bytes": "4523"
},
{
"name": "JavaScript",
"bytes": "1234"
},
{
"name": "Makefile",
"bytes": "129776"
},
{
"name": "Python",
"bytes": "2740100"
},
{
"name": "Shell",
"bytes": "3026"
}
],
"symlink_target": ""
} |
from .api_key_authenticator import APIKeyAuthenticator
from .authenticator import Authenticator
from .invited_anonymous_user import InvitedAnonymousUser
from .jwt_authenticator import JWTAuthenticator
| {
"content_hash": "a80e710c5b9d0d554b55390067eb7445",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 56,
"avg_line_length": 50.25,
"alnum_prop": 0.8756218905472637,
"repo_name": "tetherless-world/graphene",
"id": "b6b03135b4d7973413ea7e4e8a2a92582b0a8d1a",
"size": "201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "whyis/authenticator/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "460"
},
{
"name": "HTML",
"bytes": "82771"
},
{
"name": "JavaScript",
"bytes": "65463"
},
{
"name": "Puppet",
"bytes": "14733"
},
{
"name": "Python",
"bytes": "80312"
},
{
"name": "Shell",
"bytes": "1982"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import six
from sentry.models import Activity
from sentry.testutils import APITestCase
class GroupNoteTest(APITestCase):
def test_simple(self):
group = self.group
activity = Activity.objects.create(
group=group,
project=group.project,
type=Activity.NOTE,
user=self.user,
data={'text': 'hello world'},
)
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.get(url, format='json')
assert response.status_code == 200, response.content
assert len(response.data) == 1
assert response.data[0]['id'] == six.text_type(activity.id)
class GroupNoteCreateTest(APITestCase):
def test_simple(self):
group = self.group
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
response = self.client.post(url, format='json')
assert response.status_code == 400
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 201, response.content
activity = Activity.objects.get(id=response.data['id'])
assert activity.user == self.user
assert activity.group == group
assert activity.data == {'text': 'hello world'}
response = self.client.post(url, format='json', data={
'text': 'hello world',
})
assert response.status_code == 400, response.content
def test_with_mentions(self):
user = self.create_user(email='hello@meow.com')
self.org = self.create_organization(
name='Gnarly Org',
owner=None,
)
self.team = self.create_team(
organization=self.org,
name='Ultra Rad Team'
)
# member that IS NOT part of the team
self.create_member(
user=user,
organization=self.org,
role='member',
teams=[],
)
# member that IS part of the team
self.create_member(
user=self.user,
organization=self.org,
role='member',
teams=[self.team],
)
group = self.group
self.login_as(user=self.user)
url = '/api/0/issues/{}/comments/'.format(group.id)
# mentioning a member that does not exist returns 400
response = self.client.post(url, format='json', data={
'text': '**meredith@getsentry.com** is fun',
'mentions': [u'8']
})
assert response.status_code == 400, response.content
user_id = six.text_type(self.user.id)
# mentioning a member in the correct team returns 201
response = self.client.post(url, format='json', data={
'text': '**meredith@getsentry.com** is so fun',
'mentions': [u'%s' % user_id]
})
assert response.status_code == 201, response.content
user_id = six.text_type(user.id)
# mentioning a member that exists but NOT in the team returns
# validation error
response = self.client.post(url, format='json', data={
'text': '**hello@meow.com** is not so fun',
'mentions': [u'%s' % user_id]
})
assert response.content == '{"mentions": ["Cannot mention a non-team member"]}'
| {
"content_hash": "8c594982759192a2b19da330e6e63972",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 87,
"avg_line_length": 30.716814159292035,
"alnum_prop": 0.5669835782195333,
"repo_name": "JackDanger/sentry",
"id": "870fa771b9e235ad87635792a0c4a958aed5aeae",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sentry/api/endpoints/test_group_notes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583430"
},
{
"name": "HTML",
"bytes": "319622"
},
{
"name": "JavaScript",
"bytes": "624672"
},
{
"name": "Makefile",
"bytes": "2660"
},
{
"name": "Python",
"bytes": "6279717"
}
],
"symlink_target": ""
} |
from libcloud.dns.types import Provider
from libcloud.dns.providers import get_driver
cls = get_driver(Provider.GODADDY)
driver = cls("customer_id", "api_key", "api_secret")
# Get the JSON schema for the domain
schema = driver.ex_get_purchase_schema("com")
# Use this schema to prepare a purchase request document
# Load a JSON document that has the completed purchase request
file = open("purchase_request.json", "r")
document = file.read()
order = driver.ex_purchase_domain(document)
print("Made request : order ID : %s" % order.order_id)
| {
"content_hash": "6deeff7b16414a865d78191f35d4403a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 62,
"avg_line_length": 34.0625,
"alnum_prop": 0.7486238532110092,
"repo_name": "mistio/libcloud",
"id": "de955281f54061796c1c565a54c9405c77380b78",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "docs/examples/dns/godaddy/purchasing_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "9067225"
},
{
"name": "Shell",
"bytes": "12994"
}
],
"symlink_target": ""
} |
import aenum
import doctest
import sys
import unittest
from aenum import Enum, IntEnum, AutoNumberEnum, OrderedEnum, UniqueEnum, unique, skip, extend_enum
from aenum import EnumMeta, NamedTuple, TupleSize, NamedConstant, constant, NoAlias, AutoNumber, Unique, AutoNumber
from collections import OrderedDict
from datetime import timedelta
from pickle import dumps, loads, PicklingError, HIGHEST_PROTOCOL
pyver = float('%s.%s' % sys.version_info[:2])
try:
any
except NameError:
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
unicode
except NameError:
unicode = str
try:
from enum import EnumMeta as StdlibEnumMeta, Enum as StdlibEnum
import enum
if hasattr(enum, 'version'):
StdlibEnumMeta = StdlibEnum = None
del enum
except ImportError:
StdlibEnumMeta = StdlibEnum = None
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(aenum))
tests.addTests(doctest.DocFileSuite(
'doc/aenum.rst',
package=aenum,
optionflags=doctest.ELLIPSIS|doctest.NORMALIZE_WHITESPACE,
))
return tests
# for pickle tests
try:
class Stooges(Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
Stooges = sys.exc_info()[1]
try:
class IntStooges(int, Enum):
LARRY = 1
CURLY = 2
MOE = 3
except Exception:
IntStooges = sys.exc_info()[1]
try:
class FloatStooges(float, Enum):
LARRY = 1.39
CURLY = 2.72
MOE = 3.142596
except Exception:
FloatStooges = sys.exc_info()[1]
try:
LifeForm = NamedTuple('LifeForm', 'branch genus species', module=__name__)
except Exception:
LifeForm = sys.exc_info()[1]
try:
class DeathForm(NamedTuple):
color = 0
rigidity = 1
odor = 2
except Exception:
DeathForm = sys.exc_info()[1]
# for pickle test and subclass tests
try:
class StrEnum(str, Enum):
'accepts only string values'
class Name(StrEnum):
BDFL = 'Guido van Rossum'
FLUFL = 'Barry Warsaw'
except Exception:
Name = sys.exc_info()[1]
try:
Question = Enum('Question', 'who what when where why', module=__name__)
except Exception:
Question = sys.exc_info()[1]
try:
Answer = Enum('Answer', 'him this then there because')
except Exception:
Answer = sys.exc_info()[1]
try:
Theory = Enum('Theory', 'rule law supposition', qualname='spanish_inquisition')
except Exception:
Theory = sys.exc_info()[1]
try:
class WhatsIt(NamedTuple):
def what(self):
return self[0]
class ThatsIt(WhatsIt):
blah = 0
bleh = 1
except Exception:
ThatsIt = sys.exc_info()[1]
# for doctests
try:
class Fruit(Enum):
tomato = 1
banana = 2
cherry = 3
except Exception:
pass
def test_pickle_dump_load(assertion, source, target=None,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
if target is None:
if isinstance(source, Enum):
assertion(loads(dumps(source, protocol=protocol)) is source)
else:
assertion(loads(dumps(source, protocol=protocol)), source)
else:
assertion(loads(dumps(source, protocol=protocol)), target)
except Exception:
exc, tb = sys.exc_info()[1:]
failures.append('%2d: %s' %(protocol, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
def test_pickle_exception(assertion, exception, obj,
protocol=(0, HIGHEST_PROTOCOL)):
start, stop = protocol
failures = []
for protocol in range(start, stop+1):
try:
assertion(exception, dumps, obj, protocol=protocol)
except Exception:
exc = sys.exc_info()[1]
failures.append('%d: %s %s' % (protocol, exc.__class__.__name__, exc))
if failures:
raise ValueError('Failed with protocols: %s' % ', '.join(failures))
class TestHelpers(unittest.TestCase):
# _is_descriptor, _is_sunder, _is_dunder
def test_is_descriptor(self):
class foo:
pass
for attr in ('__get__','__set__','__delete__'):
obj = foo()
self.assertFalse(aenum._is_descriptor(obj))
setattr(obj, attr, 1)
self.assertTrue(aenum._is_descriptor(obj))
def test_is_sunder(self):
for s in ('_a_', '_aa_'):
self.assertTrue(aenum._is_sunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '__a__', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(aenum._is_sunder(s))
def test_is_dunder(self):
for s in ('__a__', '__aa__'):
self.assertTrue(aenum._is_dunder(s))
for s in ('a', 'a_', '_a', '__a', 'a__', '_a_', '_a__', '__a_', '_',
'__', '___', '____', '_____',):
self.assertFalse(aenum._is_dunder(s))
if pyver >= 3.0:
from aenum.test_v3 import TestEnumV3, TestNamedTupleV3
class TestEnum(unittest.TestCase):
def setUp(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
self.Season = Season
class Konstants(float, Enum):
E = 2.7182818
PI = 3.1415926
TAU = 2 * PI
self.Konstants = Konstants
class Grades(IntEnum):
A = 5
B = 4
C = 3
D = 2
F = 0
self.Grades = Grades
class Directional(str, Enum):
EAST = 'east'
WEST = 'west'
NORTH = 'north'
SOUTH = 'south'
self.Directional = Directional
from datetime import date
class Holiday(date, Enum):
NEW_YEAR = 2013, 1, 1
IDES_OF_MARCH = 2013, 3, 15
self.Holiday = Holiday
def test_members_is_ordereddict_if_ordered(self):
class Ordered(Enum):
__order__ = 'first second third'
first = 'bippity'
second = 'boppity'
third = 'boo'
self.assertTrue(type(Ordered.__members__) is OrderedDict)
def test_members_is_ordereddict_if_not_ordered(self):
class Unordered(Enum):
this = 'that'
these = 'those'
self.assertTrue(type(Unordered.__members__) is OrderedDict)
def test_enum_in_enum_out(self):
Season = self.Season
self.assertTrue(Season(Season.WINTER) is Season.WINTER)
def test_enum_value(self):
Season = self.Season
self.assertEqual(Season.SPRING.value, 1)
def test_intenum_value(self):
self.assertEqual(IntStooges.CURLY.value, 2)
def test_enum(self):
Season = self.Season
lst = list(Season)
self.assertEqual(len(lst), len(Season))
self.assertEqual(len(Season), 4, Season)
self.assertEqual(
[Season.SPRING, Season.SUMMER, Season.AUTUMN, Season.WINTER], lst)
for i, season in enumerate('SPRING SUMMER AUTUMN WINTER'.split()):
i += 1
e = Season(i)
self.assertEqual(e, getattr(Season, season))
self.assertEqual(e.value, i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, season)
self.assertTrue(e in Season)
self.assertTrue(type(e) is Season)
self.assertTrue(isinstance(e, Season))
self.assertEqual(str(e), 'Season.' + season)
self.assertEqual(
repr(e),
'<Season.%s: %s>' % (season, i),
)
def test_value_name(self):
Season = self.Season
self.assertEqual(Season.SPRING.name, 'SPRING')
self.assertEqual(Season.SPRING.value, 1)
def set_name(obj, new_value):
obj.name = new_value
def set_value(obj, new_value):
obj.value = new_value
self.assertRaises(AttributeError, set_name, Season.SPRING, 'invierno', )
self.assertRaises(AttributeError, set_value, Season.SPRING, 2)
def test_attribute_deletion(self):
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = 3
WINTER = 4
def spam(cls):
pass
self.assertTrue(hasattr(Season, 'spam'))
del Season.spam
self.assertFalse(hasattr(Season, 'spam'))
self.assertRaises(AttributeError, delattr, Season, 'SPRING')
self.assertRaises(AttributeError, delattr, Season, 'DRY')
self.assertRaises(AttributeError, delattr, Season.SPRING, 'name')
def test_bool_of_class(self):
class Empty(Enum):
pass
self.assertTrue(bool(Empty))
def test_bool_of_member(self):
class Count(Enum):
zero = 0
one = 1
two = 2
for member in Count:
self.assertTrue(bool(member))
def test_invalid_names(self):
def create_bad_class_1():
class Wrong(Enum):
mro = 9
def create_bad_class_2():
class Wrong(Enum):
_reserved_ = 3
self.assertRaises(ValueError, create_bad_class_1)
self.assertRaises(ValueError, create_bad_class_2)
def test_bool(self):
class Logic(Enum):
true = True
false = False
def __bool__(self):
return bool(self.value)
__nonzero__ = __bool__
self.assertTrue(Logic.true)
self.assertFalse(Logic.false)
def test_contains(self):
Season = self.Season
self.assertTrue(Season.AUTUMN in Season)
self.assertTrue(3 not in Season)
val = Season(3)
self.assertTrue(val in Season)
class OtherEnum(Enum):
one = 1; two = 2
self.assertTrue(OtherEnum.two not in Season)
if pyver >= 2.6: # when `format` came into being
def test_format_enum(self):
Season = self.Season
self.assertEqual('{0}'.format(Season.SPRING),
'{0}'.format(str(Season.SPRING)))
self.assertEqual( '{0:}'.format(Season.SPRING),
'{0:}'.format(str(Season.SPRING)))
self.assertEqual('{0:20}'.format(Season.SPRING),
'{0:20}'.format(str(Season.SPRING)))
self.assertEqual('{0:^20}'.format(Season.SPRING),
'{0:^20}'.format(str(Season.SPRING)))
self.assertEqual('{0:>20}'.format(Season.SPRING),
'{0:>20}'.format(str(Season.SPRING)))
self.assertEqual('{0:<20}'.format(Season.SPRING),
'{0:<20}'.format(str(Season.SPRING)))
def test_format_enum_custom(self):
class TestFloat(float, Enum):
one = 1.0
two = 2.0
def __format__(self, spec):
return 'TestFloat success!'
self.assertEqual('{0}'.format(TestFloat.one), 'TestFloat success!')
def assertFormatIsValue(self, spec, member):
self.assertEqual(spec.format(member), spec.format(member.value))
def test_format_enum_date(self):
Holiday = self.Holiday
self.assertFormatIsValue('{0}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:^20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:>20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:<20}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m}', Holiday.IDES_OF_MARCH)
self.assertFormatIsValue('{0:%Y %m %M:00}', Holiday.IDES_OF_MARCH)
def test_format_enum_float(self):
Konstants = self.Konstants
self.assertFormatIsValue('{0}', Konstants.TAU)
self.assertFormatIsValue('{0:}', Konstants.TAU)
self.assertFormatIsValue('{0:20}', Konstants.TAU)
self.assertFormatIsValue('{0:^20}', Konstants.TAU)
self.assertFormatIsValue('{0:>20}', Konstants.TAU)
self.assertFormatIsValue('{0:<20}', Konstants.TAU)
self.assertFormatIsValue('{0:n}', Konstants.TAU)
self.assertFormatIsValue('{0:5.2}', Konstants.TAU)
self.assertFormatIsValue('{0:f}', Konstants.TAU)
def test_format_enum_int(self):
Grades = self.Grades
self.assertFormatIsValue('{0}', Grades.C)
self.assertFormatIsValue('{0:}', Grades.C)
self.assertFormatIsValue('{0:20}', Grades.C)
self.assertFormatIsValue('{0:^20}', Grades.C)
self.assertFormatIsValue('{0:>20}', Grades.C)
self.assertFormatIsValue('{0:<20}', Grades.C)
self.assertFormatIsValue('{0:+}', Grades.C)
self.assertFormatIsValue('{0:08X}', Grades.C)
self.assertFormatIsValue('{0:b}', Grades.C)
def test_format_enum_str(self):
Directional = self.Directional
self.assertFormatIsValue('{0}', Directional.WEST)
self.assertFormatIsValue('{0:}', Directional.WEST)
self.assertFormatIsValue('{0:20}', Directional.WEST)
self.assertFormatIsValue('{0:^20}', Directional.WEST)
self.assertFormatIsValue('{0:>20}', Directional.WEST)
self.assertFormatIsValue('{0:<20}', Directional.WEST)
def test_hash(self):
Season = self.Season
dates = {}
dates[Season.WINTER] = '1225'
dates[Season.SPRING] = '0315'
dates[Season.SUMMER] = '0704'
dates[Season.AUTUMN] = '1031'
self.assertEqual(dates[Season.AUTUMN], '1031')
def test_enum_duplicates(self):
__order__ = "SPRING SUMMER AUTUMN WINTER"
class Season(Enum):
SPRING = 1
SUMMER = 2
AUTUMN = FALL = 3
WINTER = 4
ANOTHER_SPRING = 1
lst = list(Season)
self.assertEqual(
lst,
[Season.SPRING, Season.SUMMER,
Season.AUTUMN, Season.WINTER,
])
self.assertTrue(Season.FALL is Season.AUTUMN)
self.assertEqual(Season.FALL.value, 3)
self.assertEqual(Season.AUTUMN.value, 3)
self.assertTrue(Season(3) is Season.AUTUMN)
self.assertTrue(Season(1) is Season.SPRING)
self.assertEqual(Season.FALL.name, 'AUTUMN')
self.assertEqual(
set([k for k,v in Season.__members__.items() if v.name != k]),
set(['FALL', 'ANOTHER_SPRING']),
)
def test_enum_with_value_name(self):
class Huh(Enum):
name = 1
value = 2
self.assertEqual(
list(Huh),
[Huh.name, Huh.value],
)
self.assertTrue(type(Huh.name) is Huh)
self.assertEqual(Huh.name.name, 'name')
self.assertEqual(Huh.name.value, 1)
def test_intenum_from_scratch(self):
class phy(int, Enum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_intenum_inherited(self):
class IntEnum(int, Enum):
pass
class phy(IntEnum):
pi = 3
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_from_scratch(self):
class phy(float, Enum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_floatenum_inherited(self):
class FloatEnum(float, Enum):
pass
class phy(FloatEnum):
pi = 3.1415926
tau = 2 * pi
self.assertTrue(phy.pi < phy.tau)
def test_strenum_from_scratch(self):
class phy(str, Enum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_strenum_inherited(self):
class StrEnum(str, Enum):
pass
class phy(StrEnum):
pi = 'Pi'
tau = 'Tau'
self.assertTrue(phy.pi < phy.tau)
def test_intenum(self):
class WeekDay(IntEnum):
SUNDAY = 1
MONDAY = 2
TUESDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertEqual(['a', 'b', 'c'][WeekDay.MONDAY], 'c')
self.assertEqual([i for i in range(WeekDay.TUESDAY)], [0, 1, 2])
lst = list(WeekDay)
self.assertEqual(len(lst), len(WeekDay))
self.assertEqual(len(WeekDay), 7)
target = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
target = target.split()
for i, weekday in enumerate(target):
i += 1
e = WeekDay(i)
self.assertEqual(e, i)
self.assertEqual(int(e), i)
self.assertEqual(e.name, weekday)
self.assertTrue(e in WeekDay)
self.assertEqual(lst.index(e)+1, i)
self.assertTrue(0 < e < 8)
self.assertTrue(type(e) is WeekDay)
self.assertTrue(isinstance(e, int))
self.assertTrue(isinstance(e, Enum))
def test_intenum_duplicates(self):
class WeekDay(IntEnum):
__order__ = 'SUNDAY MONDAY TUESDAY WEDNESDAY THURSDAY FRIDAY SATURDAY'
SUNDAY = 1
MONDAY = 2
TUESDAY = TEUSDAY = 3
WEDNESDAY = 4
THURSDAY = 5
FRIDAY = 6
SATURDAY = 7
self.assertTrue(WeekDay.TEUSDAY is WeekDay.TUESDAY)
self.assertEqual(WeekDay(3).name, 'TUESDAY')
self.assertEqual([k for k,v in WeekDay.__members__.items()
if v.name != k], ['TEUSDAY', ])
def test_pickle_enum(self):
if isinstance(Stooges, Exception):
raise Stooges
test_pickle_dump_load(self.assertTrue, Stooges.CURLY)
test_pickle_dump_load(self.assertTrue, Stooges)
def test_pickle_int(self):
if isinstance(IntStooges, Exception):
raise IntStooges
test_pickle_dump_load(self.assertTrue, IntStooges.CURLY)
test_pickle_dump_load(self.assertTrue, IntStooges)
def test_pickle_float(self):
if isinstance(FloatStooges, Exception):
raise FloatStooges
test_pickle_dump_load(self.assertTrue, FloatStooges.CURLY)
test_pickle_dump_load(self.assertTrue, FloatStooges)
def test_pickle_enum_function(self):
if isinstance(Answer, Exception):
raise Answer
test_pickle_dump_load(self.assertTrue, Answer.him)
test_pickle_dump_load(self.assertTrue, Answer)
def test_pickle_enum_function_with_module(self):
if isinstance(Question, Exception):
raise Question
test_pickle_dump_load(self.assertTrue, Question.who)
test_pickle_dump_load(self.assertTrue, Question)
if pyver == 3.4:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_exception(
self.assertRaises, PicklingError, self.NestedEnum.twigs,
protocol=(0, 3))
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(4, HIGHEST_PROTOCOL))
elif pyver == 3.5:
def test_class_nested_enum_and_pickle_protocol_four(self):
# would normally just have this directly in the class namespace
class NestedEnum(Enum):
twigs = 'common'
shiny = 'rare'
self.__class__.NestedEnum = NestedEnum
self.NestedEnum.__qualname__ = '%s.NestedEnum' % self.__class__.__name__
test_pickle_dump_load(self.assertTrue, self.NestedEnum.twigs,
protocol=(0, HIGHEST_PROTOCOL))
def test_exploding_pickle(self):
BadPickle = Enum('BadPickle', 'dill sweet bread-n-butter')
aenum._make_class_unpicklable(BadPickle)
globals()['BadPickle'] = BadPickle
test_pickle_exception(self.assertRaises, TypeError, BadPickle.dill)
test_pickle_exception(self.assertRaises, PicklingError, BadPickle)
def test_string_enum(self):
class SkillLevel(str, Enum):
master = 'what is the sound of one hand clapping?'
journeyman = 'why did the chicken cross the road?'
apprentice = 'knock, knock!'
self.assertEqual(SkillLevel.apprentice, 'knock, knock!')
def test_getattr_getitem(self):
class Period(Enum):
morning = 1
noon = 2
evening = 3
night = 4
self.assertTrue(Period(2) is Period.noon)
self.assertTrue(getattr(Period, 'night') is Period.night)
self.assertTrue(Period['morning'] is Period.morning)
def test_getattr_dunder(self):
Season = self.Season
self.assertTrue(getattr(Season, '__hash__'))
def test_iteration_order(self):
class Season(Enum):
__order__ = 'SUMMER WINTER AUTUMN SPRING'
SUMMER = 2
WINTER = 4
AUTUMN = 3
SPRING = 1
self.assertEqual(
list(Season),
[Season.SUMMER, Season.WINTER, Season.AUTUMN, Season.SPRING],
)
def test_iteration_order_reversed(self):
self.assertEqual(
list(reversed(self.Season)),
[self.Season.WINTER, self.Season.AUTUMN, self.Season.SUMMER,
self.Season.SPRING]
)
def test_iteration_order_with_unorderable_values(self):
class Complex(Enum):
a = complex(7, 9)
b = complex(3.14, 2)
c = complex(1, -1)
d = complex(-77, 32)
self.assertEqual(
list(Complex),
[Complex.a, Complex.b, Complex.c, Complex.d],
)
def test_programatic_function_string(self):
SummerMonth = Enum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', start=10)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 10):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_string_list_with_start(self):
SummerMonth = Enum('SummerMonth', ['june', 'july', 'august'], start=20)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 20):
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_iterable(self):
SummerMonth = Enum(
'SummerMonth',
(('june', 1), ('july', 2), ('august', 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict((('june', 1), ('july', 2), ('august', 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_with_start(self):
SummerMonth = Enum('SummerMonth', 'june july august', type=int, start=30)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 30):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', 'june july august')
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_type_from_subclass_with_start(self):
SummerMonth = IntEnum('SummerMonth', 'june july august', start=40)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate('june july august'.split(), 40):
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_list(self):
SummerMonth = Enum('SummerMonth', [unicode('june'), unicode('july'), unicode('august')])
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_iterable(self):
SummerMonth = Enum(
'SummerMonth',
((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_from_unicode_dict(self):
SummerMonth = Enum(
'SummerMonth',
dict(((unicode('june'), 1), (unicode('july'), 2), (unicode('august'), 3)))
)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
if pyver < 3.0:
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(int(e.value), i)
self.assertNotEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type(self):
SummerMonth = Enum('SummerMonth', unicode('june july august'), type=int)
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programatic_function_unicode_type_from_subclass(self):
SummerMonth = IntEnum('SummerMonth', unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_programmatic_function_unicode_class(self):
if pyver < 3.0:
class_names = unicode('SummerMonth'), 'S\xfcmm\xe9rM\xf6nth'.decode('latin1')
else:
class_names = 'SummerMonth', 'S\xfcmm\xe9rM\xf6nth'
for i, class_name in enumerate(class_names):
if pyver < 3.0 and i == 1:
self.assertRaises(TypeError, Enum, class_name, unicode('june july august'))
else:
SummerMonth = Enum(class_name, unicode('june july august'))
lst = list(SummerMonth)
self.assertEqual(len(lst), len(SummerMonth))
self.assertEqual(len(SummerMonth), 3, SummerMonth)
self.assertEqual(
[SummerMonth.june, SummerMonth.july, SummerMonth.august],
lst,
)
for i, month in enumerate(unicode('june july august').split()):
i += 1
e = SummerMonth(i)
self.assertEqual(e.value, i)
self.assertEqual(e.name, month)
self.assertTrue(e in SummerMonth)
self.assertTrue(type(e) is SummerMonth)
def test_subclassing(self):
if isinstance(Name, Exception):
raise Name
self.assertEqual(Name.BDFL, 'Guido van Rossum')
self.assertTrue(Name.BDFL, Name('Guido van Rossum'))
self.assertTrue(Name.BDFL is getattr(Name, 'BDFL'))
test_pickle_dump_load(self.assertTrue, Name.BDFL)
def test_extending(self):
def bad_extension():
class Color(Enum):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_exclude_methods(self):
class whatever(Enum):
this = 'that'
these = 'those'
def really(self):
return 'no, not %s' % self.value
self.assertFalse(type(whatever.really) is whatever)
self.assertEqual(whatever.this.really(), 'no, not that')
def test_wrong_inheritance_order(self):
def wrong_inherit():
class Wrong(Enum, str):
NotHere = 'error before this point'
self.assertRaises(TypeError, wrong_inherit)
def test_intenum_transitivity(self):
class number(IntEnum):
one = 1
two = 2
three = 3
class numero(IntEnum):
uno = 1
dos = 2
tres = 3
self.assertEqual(number.one, numero.uno)
self.assertEqual(number.two, numero.dos)
self.assertEqual(number.three, numero.tres)
def test_introspection(self):
class Number(IntEnum):
one = 100
two = 200
self.assertTrue(Number.one._member_type_ is int)
self.assertTrue(Number._member_type_ is int)
class String(str, Enum):
yarn = 'soft'
rope = 'rough'
wire = 'hard'
self.assertTrue(String.yarn._member_type_ is str)
self.assertTrue(String._member_type_ is str)
class Plain(Enum):
vanilla = 'white'
one = 1
self.assertTrue(Plain.vanilla._member_type_ is object)
self.assertTrue(Plain._member_type_ is object)
def test_wrong_enum_in_call(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_wrong_enum_in_mixed_call(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(Enum):
male = 0
female = 1
self.assertRaises(ValueError, Monochrome, Gender.male)
def test_mixed_enum_in_call_1(self):
class Monochrome(IntEnum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.female) is Monochrome.white)
def test_mixed_enum_in_call_2(self):
class Monochrome(Enum):
black = 0
white = 1
class Gender(IntEnum):
male = 0
female = 1
self.assertTrue(Monochrome(Gender.male) is Monochrome.black)
def test_flufl_enum(self):
class Fluflnum(Enum):
def __int__(self):
return int(self.value)
class MailManOptions(Fluflnum):
option1 = 1
option2 = 2
option3 = 3
self.assertEqual(int(MailManOptions.option1), 1)
def test_no_such_enum_member(self):
class Color(Enum):
red = 1
green = 2
blue = 3
self.assertRaises(ValueError, Color, 4)
self.assertRaises(KeyError, Color.__getitem__, 'chartreuse')
def test_new_repr(self):
class Color(Enum):
red = 1
green = 2
blue = 3
def __repr__(self):
return "don't you just love shades of %s?" % self.name
self.assertEqual(
repr(Color.blue),
"don't you just love shades of blue?",
)
def test_inherited_repr(self):
class MyEnum(Enum):
def __repr__(self):
return "My name is %s." % self.name
class MyIntEnum(int, MyEnum):
this = 1
that = 2
theother = 3
self.assertEqual(repr(MyIntEnum.that), "My name is that.")
def test_multiple_mixin_mro(self):
class auto_enum(EnumMeta):
def __new__(metacls, cls, bases, classdict):
original_dict = classdict
classdict = aenum._EnumDict()
for k in getattr(original_dict, '_member_names', original_dict.keys()):
classdict[k] = original_dict[k]
temp = type(classdict)()
names = set(classdict._member_names)
i = 0
for k in classdict._member_names:
v = classdict[k]
if v == ():
v = i
else:
i = v
i += 1
temp[k] = v
for k, v in classdict.items():
if k not in names:
temp[k] = v
return super(auto_enum, metacls).__new__(
metacls, cls, bases, temp)
AutoNumberedEnum = auto_enum('AutoNumberedEnum', (Enum,), {})
AutoIntEnum = auto_enum('AutoIntEnum', (IntEnum,), {})
class TestAutoNumber(AutoNumberedEnum):
a = ()
b = 3
c = ()
self.assertEqual(TestAutoNumber.b.value, 3)
if pyver >= 3.0:
self.assertEqual(
[TestAutoNumber.a.value, TestAutoNumber.b.value, TestAutoNumber.c.value],
[0, 3, 4],
)
class TestAutoInt(AutoIntEnum):
a = ()
b = 3
c = ()
self.assertEqual(TestAutoInt.b, 3)
if pyver >= 3.0:
self.assertEqual(
[TestAutoInt.a.value, TestAutoInt.b.value, TestAutoInt.c.value],
[0, 3, 4],
)
def test_subclasses_with_getnewargs(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs__(self):
return self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertTrue, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
if pyver >= 3.4:
def test_subclasses_with_getnewargs_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 2:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __getnewargs_ex__(self):
return self._args, {}
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "{}({!r}, {})".format(type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'({0} + {1})'.format(self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertIs(NEI.__new__, Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5, protocol=(4, HIGHEST_PROTOCOL))
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y, protocol=(4, HIGHEST_PROTOCOL))
def test_subclasses_with_reduce(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce__(self):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_with_reduce_ex(self):
class NamedInt(int):
__qualname__ = 'NamedInt' # needed for pickle protocol 4
def __new__(cls, *args):
_args = args
if len(args) < 1:
raise TypeError("name and value must be specified")
name, args = args[0], args[1:]
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
def __reduce_ex__(self, proto):
return self.__class__, self._args
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI' # needed for pickle protocol 4
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
test_pickle_dump_load(self.assertEqual, NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
def test_subclasses_without_direct_pickle_support(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_exception(self.assertRaises, TypeError, NEI.x)
test_pickle_exception(self.assertRaises, PicklingError, NEI)
def test_subclasses_without_direct_pickle_support_using_name(self):
class NamedInt(int):
__qualname__ = 'NamedInt'
def __new__(cls, *args):
_args = args
name, args = args[0], args[1:]
if len(args) == 0:
raise TypeError("name and value must be specified")
self = int.__new__(cls, *args)
self._intname = name
self._args = _args
return self
@property
def __name__(self):
return self._intname
def __repr__(self):
# repr() is updated to include the name and type info
return "%s(%r, %s)" % (type(self).__name__,
self.__name__,
int.__repr__(self))
def __str__(self):
# str() is unchanged, even if it relies on the repr() fallback
base = int
base_str = base.__str__
if base_str.__objclass__ is object:
return base.__repr__(self)
return base_str(self)
# for simplicity, we only define one operator that
# propagates expressions
def __add__(self, other):
temp = int(self) + int( other)
if isinstance(self, NamedInt) and isinstance(other, NamedInt):
return NamedInt(
'(%s + %s)' % (self.__name__, other.__name__),
temp )
else:
return temp
class NEI(NamedInt, Enum):
__qualname__ = 'NEI'
x = ('the-x', 1)
y = ('the-y', 2)
def __reduce_ex__(self, proto):
return getattr, (self.__class__, self._name_)
self.assertTrue(NEI.__new__ is Enum.__new__)
self.assertEqual(repr(NEI.x + NEI.y), "NamedInt('(the-x + the-y)', 3)")
globals()['NamedInt'] = NamedInt
globals()['NEI'] = NEI
NI5 = NamedInt('test', 5)
self.assertEqual(NI5, 5)
self.assertEqual(NEI.y.value, 2)
test_pickle_dump_load(self.assertTrue, NEI.y)
test_pickle_dump_load(self.assertTrue, NEI)
def test_tuple_subclass(self):
class SomeTuple(tuple, Enum):
__qualname__ = 'SomeTuple'
first = (1, 'for the money')
second = (2, 'for the show')
third = (3, 'for the music')
self.assertTrue(type(SomeTuple.first) is SomeTuple)
self.assertTrue(isinstance(SomeTuple.second, tuple))
self.assertEqual(SomeTuple.third, (3, 'for the music'))
globals()['SomeTuple'] = SomeTuple
test_pickle_dump_load(self.assertTrue, SomeTuple.first)
def test_duplicate_values_give_unique_enum_items(self):
class NumericEnum(AutoNumberEnum):
__order__ = 'enum_m enum_d enum_y'
enum_m = ()
enum_d = ()
enum_y = ()
def __int__(self):
return int(self._value_)
self.assertEqual(int(NumericEnum.enum_d), 2)
self.assertEqual(NumericEnum.enum_y.value, 3)
self.assertTrue(NumericEnum(1) is NumericEnum.enum_m)
self.assertEqual(
list(NumericEnum),
[NumericEnum.enum_m, NumericEnum.enum_d, NumericEnum.enum_y],
)
def test_inherited_new_from_enhanced_enum(self):
class AutoNumber2(Enum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
def __int__(self):
return int(self._value_)
class Color(AutoNumber2):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
if pyver >= 3.0:
self.assertEqual(list(map(int, Color)), [1, 2, 3])
def test_inherited_new_from_mixed_enum(self):
class AutoNumber3(IntEnum):
def __new__(cls):
value = len(cls.__members__) + 1
obj = int.__new__(cls, value)
obj._value_ = value
return obj
class Color(AutoNumber3):
red = ()
green = ()
blue = ()
self.assertEqual(len(Color), 3, "wrong number of elements: %d (should be %d)" % (len(Color), 3))
Color.red
Color.green
Color.blue
def test_equality(self):
class AlwaysEqual:
def __eq__(self, other):
return True
class OrdinaryEnum(Enum):
a = 1
self.assertEqual(AlwaysEqual(), OrdinaryEnum.a)
self.assertEqual(OrdinaryEnum.a, AlwaysEqual())
def test_ordered_mixin(self):
class Grade(OrderedEnum):
__order__ = 'A B C D F'
A = 5
B = 4
C = 3
D = 2
F = 1
self.assertEqual(list(Grade), [Grade.A, Grade.B, Grade.C, Grade.D, Grade.F])
self.assertTrue(Grade.A > Grade.B)
self.assertTrue(Grade.F <= Grade.C)
self.assertTrue(Grade.D < Grade.A)
self.assertTrue(Grade.B >= Grade.B)
def test_extending2(self):
def bad_extension():
class Shade(Enum):
def shade(self):
print(self.name)
class Color(Shade):
red = 1
green = 2
blue = 3
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertRaises(TypeError, bad_extension)
def test_extending3(self):
class Shade(Enum):
def shade(self):
return self.name
class Color(Shade):
def hex(self):
return '%s hexlified!' % self.value
class MoreColor(Color):
cyan = 4
magenta = 5
yellow = 6
self.assertEqual(MoreColor.magenta.hex(), '5 hexlified!')
def test_extend_enum_plain(self):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
extend_enum(Color, 'brown', 4)
self.assertEqual(Color.brown.name, 'brown')
self.assertEqual(Color.brown.value, 4)
self.assertTrue(Color.brown in Color)
self.assertEqual(len(Color), 4)
def test_extend_enum_shadow(self):
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
extend_enum(Color, 'value', 4)
self.assertEqual(Color.value.name, 'value')
self.assertEqual(Color.value.value, 4)
self.assertTrue(Color.value in Color)
self.assertEqual(len(Color), 4)
self.assertEqual(Color.red.value, 1)
def test_no_duplicates(self):
def bad_duplicates():
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
class Color(UniqueEnum):
red = 1
green = 2
blue = 3
grene = 2
self.assertRaises(ValueError, bad_duplicates)
def test_no_duplicates_kinda(self):
class Silly(UniqueEnum):
one = 1
two = 'dos'
name = 3
class Sillier(IntEnum, UniqueEnum):
single = 1
name = 2
triple = 3
value = 4
def test_init(self):
class Planet(Enum):
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
def __init__(self, mass, radius):
self.mass = mass # in kilograms
self.radius = radius # in meters
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_nonhash_value(self):
class AutoNumberInAList(Enum):
def __new__(cls):
value = [len(cls.__members__) + 1]
obj = object.__new__(cls)
obj._value_ = value
return obj
class ColorInAList(AutoNumberInAList):
__order__ = 'red green blue'
red = ()
green = ()
blue = ()
self.assertEqual(list(ColorInAList), [ColorInAList.red, ColorInAList.green, ColorInAList.blue])
self.assertEqual(ColorInAList.red.value, [1])
self.assertEqual(ColorInAList([1]), ColorInAList.red)
def test_conflicting_types_resolved_in_new(self):
class LabelledIntEnum(int, Enum):
def __new__(cls, *args):
value, label = args
obj = int.__new__(cls, value)
obj.label = label
obj._value_ = value
return obj
class LabelledList(LabelledIntEnum):
unprocessed = (1, "Unprocessed")
payment_complete = (2, "Payment Complete")
self.assertEqual(LabelledList.unprocessed, 1)
self.assertEqual(LabelledList(1), LabelledList.unprocessed)
self.assertEqual(list(LabelledList), [LabelledList.unprocessed, LabelledList.payment_complete])
def test_empty_with_functional_api(self):
empty = aenum.IntEnum('Foo', {})
self.assertEqual(len(empty), 0)
def test_auto_init(self):
class Planet(Enum):
_init_ = 'mass radius'
MERCURY = (3.303e+23, 2.4397e6)
VENUS = (4.869e+24, 6.0518e6)
EARTH = (5.976e+24, 6.37814e6)
MARS = (6.421e+23, 3.3972e6)
JUPITER = (1.9e+27, 7.1492e7)
SATURN = (5.688e+26, 6.0268e7)
URANUS = (8.686e+25, 2.5559e7)
NEPTUNE = (1.024e+26, 2.4746e7)
@property
def surface_gravity(self):
# universal gravitational constant (m3 kg-1 s-2)
G = 6.67300E-11
return G * self.mass / (self.radius * self.radius)
self.assertEqual(round(Planet.EARTH.surface_gravity, 2), 9.80)
self.assertEqual(Planet.EARTH.value, (5.976e+24, 6.37814e6))
def test_auto_init_with_value(self):
class Color(Enum):
_init_='value, rgb'
RED = 1, (1, 0, 0)
BLUE = 2, (0, 1, 0)
GREEN = 3, (0, 0, 1)
self.assertEqual(Color.RED.value, 1)
self.assertEqual(Color.BLUE.value, 2)
self.assertEqual(Color.GREEN.value, 3)
self.assertEqual(Color.RED.rgb, (1, 0, 0))
self.assertEqual(Color.BLUE.rgb, (0, 1, 0))
self.assertEqual(Color.GREEN.rgb, (0, 0, 1))
def test_settings(self):
class Settings(Enum):
_settings_ = NoAlias
red = 1
rojo = 1
self.assertFalse(Settings.red is Settings.rojo)
def test_auto_and_init(self):
class Field(IntEnum):
_order_ = 'TYPE START'
_init_ = '__doc__'
_settings_ = AutoNumber
TYPE = "Char, Date, Logical, etc."
START = "Field offset in record"
self.assertEqual(Field.TYPE, 1)
self.assertEqual(Field.START, 2)
self.assertEqual(Field.TYPE.__doc__, 'Char, Date, Logical, etc.')
self.assertEqual(Field.START.__doc__, 'Field offset in record')
def test_auto_and_start(self):
class Field(IntEnum):
_order_ = 'TYPE START'
_start_ = 0
_init_ = '__doc__'
TYPE = "Char, Date, Logical, etc."
START = "Field offset in record"
self.assertEqual(Field.TYPE, 0)
self.assertEqual(Field.START, 1)
self.assertEqual(Field.TYPE.__doc__, 'Char, Date, Logical, etc.')
self.assertEqual(Field.START.__doc__, 'Field offset in record')
def test_auto_and_init_and_some_values(self):
class Field(IntEnum):
_order_ = 'TYPE START'
_init_ = '__doc__'
_settings_ = AutoNumber
TYPE = "Char, Date, Logical, etc."
START = "Field offset in record"
BLAH = 5, "test blah"
BELCH = 'test belch'
self.assertEqual(Field.TYPE, 1)
self.assertEqual(Field.START, 2)
self.assertEqual(Field.BLAH, 5)
self.assertEqual(Field.BELCH, 6)
self.assertEqual(Field.TYPE.__doc__, 'Char, Date, Logical, etc.')
self.assertEqual(Field.START.__doc__, 'Field offset in record')
self.assertEqual(Field.BLAH.__doc__, 'test blah')
self.assertEqual(Field.BELCH.__doc__, 'test belch')
def test_auto_and_init_inherited(self):
class AutoEnum(IntEnum):
_start_ = 0
_init_ = '__doc__'
class Field(AutoEnum):
_order_ = 'TYPE START BLAH BELCH'
TYPE = "Char, Date, Logical, etc."
START = "Field offset in record"
BLAH = 5, "test blah"
BELCH = 'test belch'
self.assertEqual(Field.TYPE, 0)
self.assertEqual(Field.START, 1)
self.assertEqual(Field.BLAH, 5)
self.assertEqual(Field.BELCH, 6)
self.assertEqual(Field.TYPE.__doc__, 'Char, Date, Logical, etc.')
self.assertEqual(Field.START.__doc__, 'Field offset in record')
self.assertEqual(Field.BLAH.__doc__, 'test blah')
self.assertEqual(Field.BELCH.__doc__, 'test belch')
def test_AutoNumberEnum_and_property(self):
class Color(aenum.AutoNumberEnum):
red = ()
green = ()
blue = ()
@property
def cap_name(self):
return self.name.title()
self.assertEqual(Color.blue.cap_name, 'Blue')
def test_AutoNumberEnum(self):
class Color(aenum.AutoNumberEnum):
red = ()
green = ()
blue = ()
# In py2 the order should blue, green, red
# In py3 the order should be red, green, blue
if pyver < 3.0:
self.assertEqual(list(Color), [Color.blue, Color.green, Color.red])
self.assertEqual(Color.blue.value, 1)
self.assertEqual(Color.green.value, 2)
self.assertEqual(Color.red.value, 3)
else:
self.assertEqual(list(Color), [Color.red, Color.green, Color.blue])
self.assertEqual(Color.red.value, 1)
self.assertEqual(Color.green.value, 2)
self.assertEqual(Color.blue.value, 3)
def test_combine_new_settings_with_old_settings(self):
class Auto(Enum):
_settings_ = Unique
with self.assertRaises(ValueError):
class AutoUnique(Auto):
_settings_ = AutoNumber
BLAH = ()
BLUH = ()
ICK = 1
def test_timedelta(self):
class Period(timedelta, Enum):
'''
different lengths of time
'''
_init_ = 'value period'
_settings_ = NoAlias
_ignore_ = 'Period i'
Period = vars()
for i in range(31):
Period['day_%d' % i] = i, 'day'
for i in range(15):
Period['week_%d' % i] = i*7, 'week'
for i in range(12):
Period['month_%d' % i] = i*30, 'month'
OneDay = day_1
OneWeek = week_1
self.assertFalse(hasattr(Period, '_ignore_'))
self.assertFalse(hasattr(Period, 'Period'))
self.assertFalse(hasattr(Period, 'i'))
self.assertTrue(isinstance(Period.day_1, timedelta))
def test_skip(self):
class enumA(Enum):
@skip
class enumB(Enum):
elementA = 'a'
elementB = 'b'
@skip
class enumC(Enum):
elementC = 'c'
elementD = 'd'
self.assertIs(enumA.enumB, enumA.__dict__['enumB'])
if StdlibEnumMeta is not None:
def test_stdlib_inheritence(self):
self.assertTrue(isinstance(self.Season, StdlibEnumMeta))
self.assertTrue(issubclass(self.Season, StdlibEnum))
class TestUnique(unittest.TestCase):
"""2.4 doesn't allow class decorators, use function syntax."""
def test_unique_clean(self):
class Clean(Enum):
one = 1
two = 'dos'
tres = 4.0
unique(Clean)
class Cleaner(IntEnum):
single = 1
double = 2
triple = 3
unique(Cleaner)
def test_unique_dirty(self):
try:
class Dirty(Enum):
__order__ = 'one two tres'
one = 1
two = 'dos'
tres = 1
unique(Dirty)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('tres -> one' in message)
try:
class Dirtier(IntEnum):
__order__ = 'single double triple turkey'
single = 1
double = 1
triple = 3
turkey = 3
unique(Dirtier)
except ValueError:
exc = sys.exc_info()[1]
message = exc.args[0]
self.assertTrue('double -> single' in message)
self.assertTrue('turkey -> triple' in message)
def test_unique_with_name(self):
@unique
class Silly(Enum):
one = 1
two = 'dos'
name = 3
@unique
class Sillier(IntEnum):
single = 1
name = 2
triple = 3
value = 4
class TestNamedTuple(unittest.TestCase):
def test_explicit_indexing(self):
class Person(NamedTuple):
age = 0
first = 1
last = 2
p1 = Person(17, 'John', 'Doe')
p2 = Person(21, 'Jane', 'Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p2.age, 21)
self.assertEqual(p2.first, 'Jane')
self.assertEqual(p2.last, 'Doe')
def test_implicit_indexing(self):
class Person(NamedTuple):
__order__ = "age first last"
age = "person's age"
first = "person's first name"
last = "person's last name"
p1 = Person(17, 'John', 'Doe')
p2 = Person(21, 'Jane', 'Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p2.age, 21)
self.assertEqual(p2.first, 'Jane')
self.assertEqual(p2.last, 'Doe')
def test_mixed_indexing(self):
class Person(NamedTuple):
__order__ = "age last cars"
age = "person's age"
last = 2, "person's last name"
cars = "person's cars"
p1 = Person(17, 'John', 'Doe', 3)
p2 = Person(21, 'Jane', 'Doe', 9)
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1[3], 3)
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p2[3], 9)
self.assertEqual(p1.age, 17)
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p1.cars, 3)
self.assertEqual(p2.age, 21)
self.assertEqual(p2.last, 'Doe')
self.assertEqual(p2.cars, 9)
def test_issubclass(self):
class Person(NamedTuple):
age = 0
first = 1
last = 2
self.assertTrue(issubclass(Person, NamedTuple))
self.assertTrue(issubclass(Person, tuple))
def test_isinstance(self):
class Person(NamedTuple):
age = 0
first = 1
last = 2
p1 = Person(17, 'John', 'Doe')
self.assertTrue(isinstance(p1, Person))
self.assertTrue(isinstance(p1, NamedTuple))
self.assertTrue(isinstance(p1, tuple))
def test_explicit_indexing_after_functional_api(self):
Person = NamedTuple('Person', (('age', 0), ('first', 1), ('last', 2)))
p1 = Person(17, 'John', 'Doe')
p2 = Person(21, 'Jane', 'Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p2.age, 21)
self.assertEqual(p2.first, 'Jane')
self.assertEqual(p2.last, 'Doe')
def test_implicit_indexing_after_functional_api(self):
Person = NamedTuple('Person', 'age first last')
p1 = Person(17, 'John', 'Doe')
p2 = Person(21, 'Jane', 'Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p2.age, 21)
self.assertEqual(p2.first, 'Jane')
self.assertEqual(p2.last, 'Doe')
def test_mixed_indexing_after_functional_api(self):
Person = NamedTuple('Person', (('age', 0), ('last', 2), ('cars', 3)))
p1 = Person(17, 'John', 'Doe', 3)
p2 = Person(21, 'Jane', 'Doe', 9)
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1[3], 3)
self.assertEqual(p2[0], 21)
self.assertEqual(p2[1], 'Jane')
self.assertEqual(p2[2], 'Doe')
self.assertEqual(p2[3], 9)
self.assertEqual(p1.age, 17)
self.assertEqual(p1.last, 'Doe')
self.assertEqual(p1.cars, 3)
self.assertEqual(p2.age, 21)
self.assertEqual(p2.last, 'Doe')
self.assertEqual(p2.cars, 9)
def test_issubclass_after_functional_api(self):
Person = NamedTuple('Person', 'age first last')
self.assertTrue(issubclass(Person, NamedTuple))
self.assertTrue(issubclass(Person, tuple))
def test_isinstance_after_functional_api(self):
Person = NamedTuple('Person', 'age first last')
p1 = Person(17, 'John', 'Doe')
self.assertTrue(isinstance(p1, Person))
self.assertTrue(isinstance(p1, NamedTuple))
self.assertTrue(isinstance(p1, tuple))
def test_creation_with_all_keywords(self):
Person = NamedTuple('Person', 'age first last')
p1 = Person(age=17, first='John', last='Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
def test_creation_with_some_keywords(self):
Person = NamedTuple('Person', 'age first last')
p1 = Person(17, first='John', last='Doe')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
p1 = Person(17, last='Doe', first='John')
self.assertEqual(p1[0], 17)
self.assertEqual(p1[1], 'John')
self.assertEqual(p1[2], 'Doe')
self.assertEqual(p1.age, 17)
self.assertEqual(p1.first, 'John')
self.assertEqual(p1.last, 'Doe')
def test_custom_new(self):
class Book(NamedTuple):
title = 0
author = 1
genre = 2
def __new__(cls, string):
args = [s.strip() for s in string.split(';')]
return super(Book, cls).__new__(cls, *tuple(args))
b1 = Book('The Last Mohican; John Doe; Historical')
self.assertEqual(b1.title, 'The Last Mohican')
self.assertEqual(b1.author, 'John Doe')
self.assertEqual(b1.genre, 'Historical')
def test_defaults_in_class(self):
class Character(NamedTuple):
name = 0
gender = 1, None, 'male'
klass = 2, None, 'fighter'
for char in (
{'name':'John Doe'},
{'name':'William Pickney', 'klass':'scholar'},
{'name':'Sarah Doughtery', 'gender':'female'},
{'name':'Sissy Moonbeam', 'gender':'female', 'klass':'sorceress'},
):
c = Character(**char)
for name, value in (('name', None), ('gender','male'), ('klass','fighter')):
if name in char:
value = char[name]
self.assertEqual(getattr(c, name), value)
def test_defaults_in_class_that_are_falsey(self):
class Point(NamedTuple):
x = 0, 'horizondal coordinate', 0
y = 1, 'vertical coordinate', 0
p = Point()
self.assertEqual(p.x, 0)
self.assertEqual(p.y, 0)
def test_pickle_namedtuple_with_module(self):
if isinstance(LifeForm, Exception):
raise LifeForm
lf = LifeForm('this', 'that', 'theother')
test_pickle_dump_load(self.assertEqual, lf)
def test_pickle_namedtuple_without_module(self):
if isinstance(DeathForm, Exception):
raise DeathForm
df = DeathForm('sickly green', '2x4', 'foul')
test_pickle_dump_load(self.assertEqual, df)
def test_subclassing(self):
if isinstance(ThatsIt, Exception):
raise ThatsIt
ti = ThatsIt('Henry', 'Weinhardt')
self.assertEqual(ti.blah, 'Henry')
self.assertTrue(ti.what(), 'Henry')
test_pickle_dump_load(self.assertEqual, ti)
def test_contains(self):
Book = NamedTuple('Book', 'title author genre')
b = Book('Teckla', 'Steven Brust', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
def test_fixed_size(self):
class Book(NamedTuple):
_size_ = TupleSize.fixed
title = 0
author = 1
genre = 2
b = Book('Teckla', 'Steven Brust', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, 'Steven Brust')
self.assertRaises(TypeError, Book, 'Teckla', 'Steven Brust')
self.assertRaises(TypeError, Book, 'Teckla')
def test_minimum_size(self):
class Book(NamedTuple):
_size_ = TupleSize.minimum
title = 0
author = 1
b = Book('Teckla', 'Steven Brust', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, 'Steven Brust')
b = Book('Teckla', 'Steven Brust')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, 'Steven Brust')
self.assertRaises(TypeError, Book, 'Teckla')
def test_variable_size(self):
class Book(NamedTuple):
_size_ = TupleSize.variable
title = 0
author = 1
genre = 2
b = Book('Teckla', 'Steven Brust', 'fantasy')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertTrue('fantasy' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, 'Steven Brust')
self.assertEqual(b.genre, 'fantasy')
b = Book('Teckla', 'Steven Brust')
self.assertTrue('Teckla' in b)
self.assertTrue('Steven Brust' in b)
self.assertEqual(b.title, 'Teckla')
self.assertEqual(b.author, 'Steven Brust')
self.assertRaises(AttributeError, getattr, b, 'genre')
self.assertRaises(TypeError, Book, title='Teckla', genre='fantasy')
self.assertRaises(TypeError, Book, author='Steven Brust')
def test_combining_namedtuples(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
Pixel1 = NamedTuple('Pixel', Point+Color, module=__name__)
class Pixel2(Point, Color):
"a colored dot"
class Pixel3(Point):
r = 2, 'red component', 11
g = 3, 'green component', 29
b = 4, 'blue component', 37
self.assertEqual(Pixel1._fields_, 'x y r g b'.split())
self.assertEqual(Pixel1.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel1.x.default, 1)
self.assertEqual(Pixel1.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel1.y.default, -1)
self.assertEqual(Pixel1.r.__doc__, 'red component')
self.assertEqual(Pixel1.r.default, 11)
self.assertEqual(Pixel1.g.__doc__, 'green component')
self.assertEqual(Pixel1.g.default, 29)
self.assertEqual(Pixel1.b.__doc__, 'blue component')
self.assertEqual(Pixel1.b.default, 37)
self.assertEqual(Pixel2._fields_, 'x y r g b'.split())
self.assertEqual(Pixel2.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel2.x.default, 1)
self.assertEqual(Pixel2.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel2.y.default, -1)
self.assertEqual(Pixel2.r.__doc__, 'red component')
self.assertEqual(Pixel2.r.default, 11)
self.assertEqual(Pixel2.g.__doc__, 'green component')
self.assertEqual(Pixel2.g.default, 29)
self.assertEqual(Pixel2.b.__doc__, 'blue component')
self.assertEqual(Pixel2.b.default, 37)
self.assertEqual(Pixel3._fields_, 'x y r g b'.split())
self.assertEqual(Pixel3.x.__doc__, 'horizontal coordinate')
self.assertEqual(Pixel3.x.default, 1)
self.assertEqual(Pixel3.y.__doc__, 'vertical coordinate')
self.assertEqual(Pixel3.y.default, -1)
self.assertEqual(Pixel3.r.__doc__, 'red component')
self.assertEqual(Pixel3.r.default, 11)
self.assertEqual(Pixel3.g.__doc__, 'green component')
self.assertEqual(Pixel3.g.default, 29)
self.assertEqual(Pixel3.b.__doc__, 'blue component')
self.assertEqual(Pixel3.b.default, 37)
def test_function_api_type(self):
class Tester(NamedTuple):
def howdy(self):
return 'backwards', list(reversed(self))
Testee = NamedTuple('Testee', 'a c e', type=Tester)
t = Testee(1, 2, 3)
self.assertEqual(t.howdy(), ('backwards', [3, 2, 1]))
def test_asdict(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
Pixel = NamedTuple('Pixel', Point+Color, module=__name__)
pixel = Pixel(99, -101, 255, 128, 0)
self.assertEqual(pixel._asdict(), {'x':99, 'y':-101, 'r':255, 'g':128, 'b':0})
def test_make(self):
class Point(NamedTuple):
x = 0, 'horizontal coordinate', 1
y = 1, 'vertical coordinate', -1
self.assertEqual(Point(4, 5), (4, 5))
self.assertEqual(Point._make((4, 5)), (4, 5))
def test_replace(self):
class Color(NamedTuple):
r = 0, 'red component', 11
g = 1, 'green component', 29
b = 2, 'blue component', 37
purple = Color(127, 0, 127)
mid_gray = purple._replace(g=127)
self.assertEqual(mid_gray, (127, 127, 127))
class TestNamedConstant(unittest.TestCase):
def test_constantness(self):
class K(NamedConstant):
PI = 3.141596
TAU = 2 * PI
self.assertEqual(K.PI, 3.141596)
self.assertEqual(K.TAU, 2 * K.PI)
self.assertRaises(AttributeError, setattr, K, 'PI', 9)
def test_duplicates(self):
class CardNumber(NamedConstant):
ACE = 11
TWO = 2
THREE = 3
FOUR = 4
FIVE = 5
SIX = 6
SEVEN = 7
EIGHT = 8
NINE = 9
TEN = 10
JACK = 10
QUEEN = 10
KING = 10
self.assertFalse(CardNumber.TEN is CardNumber.JACK)
self.assertEqual(CardNumber.TEN, CardNumber.JACK)
self.assertEqual(CardNumber.TEN, 10)
def test_extend_constants(self):
class CardSuit(NamedConstant):
HEARTS = 1
SPADES = 2
DIAMONTS = 3
CLUBS = 4
self.assertEqual(CardSuit.HEARTS, 1)
stars = CardSuit('STARS', 5)
self.assertIs(stars, CardSuit.STARS)
self.assertEqual(CardSuit.STARS, 5)
def test_constant_with_docstring(self):
class Stuff(NamedConstant):
Artifact = constant(7, "lucky number!")
Bowling = 11
HillWomp = constant(29, 'blah blah')
self.assertEqual(Stuff.Artifact, 7)
self.assertEqual(Stuff.Artifact.__doc__, 'lucky number!')
self.assertEqual(Stuff.Bowling, 11)
self.assertEqual(Stuff.Bowling.__doc__, None)
self.assertEqual(Stuff.HillWomp, 29)
self.assertEqual(Stuff.HillWomp.__doc__, 'blah blah')
class TestMe(unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ebb7ca86c31c2c40a481df690251d994",
"timestamp": "",
"source": "github",
"line_count": 2407,
"max_line_length": 115,
"avg_line_length": 37.03323639385127,
"alnum_prop": 0.528836984933643,
"repo_name": "harshita-gupta/Harvard-FRSEM-Catalog-2016-17",
"id": "3c8c47111457fb5bf5efbfd25ff5f7c3bb85f62f",
"size": "89139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask/lib/python2.7/site-packages/aenum/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6612"
},
{
"name": "CSS",
"bytes": "55614"
},
{
"name": "HTML",
"bytes": "18090"
},
{
"name": "JavaScript",
"bytes": "176982"
},
{
"name": "Python",
"bytes": "13727320"
},
{
"name": "Shell",
"bytes": "3264"
}
],
"symlink_target": ""
} |
try:
from collections import namedtuple
except ImportError:
print("SKIP")
raise SystemExit
import skip_if
skip_if.no_cpython_compat()
_DefragResultBase = namedtuple('DefragResult', [ 'foo', 'bar' ])
class _ResultMixinStr(object):
def encode(self):
return self._encoded_counterpart(*(x.encode() for x in self))
class _ResultMixinBytes(object):
def decode(self):
return self._decoded_counterpart(*(x.decode() for x in self))
class DefragResult(_DefragResultBase, _ResultMixinStr):
pass
class DefragResultBytes(_DefragResultBase, _ResultMixinBytes):
pass
DefragResult._encoded_counterpart = DefragResultBytes
DefragResultBytes._decoded_counterpart = DefragResult
# Due to differences in type and native subclass printing,
# the best thing we can do here is to just test that no exceptions
# happen
#print(DefragResult, DefragResult._encoded_counterpart)
#print(DefragResultBytes, DefragResultBytes._decoded_counterpart)
o1 = DefragResult("a", "b")
#print(o1, type(o1))
o2 = DefragResultBytes("a", "b")
#print(o2, type(o2))
#print(o1._encoded_counterpart)
_o1 = o1.encode()
print(_o1[0], _o1[1])
#print(_o1, type(_o1))
print("All's ok")
| {
"content_hash": "1013b20765c0d1db83b9ed78dc41f45b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 69,
"avg_line_length": 25.319148936170212,
"alnum_prop": 0.7260504201680672,
"repo_name": "adafruit/micropython",
"id": "504f460a7d2e4b81177d6d3ae13da470253f5458",
"size": "1447",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/basics/class_store_class.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "36329657"
},
{
"name": "C++",
"bytes": "758796"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "85896"
},
{
"name": "Objective-C",
"bytes": "458875"
},
{
"name": "Python",
"bytes": "588680"
},
{
"name": "Shell",
"bytes": "4829"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Ship()
result.template = "object/ship/shared_star_destroyer.iff"
result.attribute_template_id = -1
result.stfName("","xwing swg-sw test")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "8084f8a954e3902a21f53076bef9ff62",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 58,
"avg_line_length": 21.76923076923077,
"alnum_prop": 0.6749116607773852,
"repo_name": "anhstudios/swganh",
"id": "98d08fe45bff38f7de10562ac288d59263e59c9c",
"size": "428",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/ship/shared_star_destroyer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import re
import six
from testtools import matchers
from heat.common import exception
from heat.common import template_format
from heat.engine.resources.openstack.heat import random_string as rs
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class TestRandomString(common.HeatTestCase):
template_random_string = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
secret1:
Type: OS::Heat::RandomString
secret2:
Type: OS::Heat::RandomString
Properties:
length: 10
secret3:
Type: OS::Heat::RandomString
Properties:
length: 100
sequence: octdigits
secret4:
Type: OS::Heat::RandomString
Properties:
length: 32
character_classes:
- class: digits
min: 1
- class: uppercase
min: 1
- class: lowercase
min: 20
character_sequences:
- sequence: (),[]{}
min: 1
- sequence: $_
min: 2
- sequence: '@'
min: 5
secret5:
Type: OS::Heat::RandomString
Properties:
length: 25
character_classes:
- class: digits
min: 1
- class: uppercase
min: 1
- class: lowercase
min: 20
secret6:
Type: OS::Heat::RandomString
Properties:
length: 10
character_sequences:
- sequence: (),[]{}
min: 1
- sequence: $_
min: 2
- sequence: '@'
min: 5
'''
def setUp(self):
super(TestRandomString, self).setUp()
self.ctx = utils.dummy_context()
def create_stack(self, templ):
self.stack = self.parse_stack(template_format.parse(templ))
self.assertIsNone(self.stack.create())
return self.stack
def parse_stack(self, t):
stack_name = 'test_stack'
tmpl = template.Template(t)
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl)
stack.validate()
stack.store()
return stack
def test_random_string(self):
stack = self.create_stack(self.template_random_string)
secret1 = stack['secret1']
def assert_min(pattern, string, minimum):
self.assertTrue(len(re.findall(pattern, string)) >= minimum)
random_string = secret1.FnGetAtt('value')
assert_min('[a-zA-Z0-9]', random_string, 32)
self.assertRaises(exception.InvalidTemplateAttribute,
secret1.FnGetAtt, 'foo')
self.assertEqual(secret1.FnGetRefId(), random_string)
secret2 = stack['secret2']
random_string = secret2.FnGetAtt('value')
assert_min('[a-zA-Z0-9]', random_string, 10)
self.assertEqual(secret2.FnGetRefId(), random_string)
secret3 = stack['secret3']
random_string = secret3.FnGetAtt('value')
assert_min('[0-7]', random_string, 100)
self.assertEqual(secret3.FnGetRefId(), random_string)
secret4 = stack['secret4']
random_string = secret4.FnGetAtt('value')
self.assertEqual(32, len(random_string))
assert_min('[0-9]', random_string, 1)
assert_min('[A-Z]', random_string, 1)
assert_min('[a-z]', random_string, 20)
assert_min('[(),\[\]{}]', random_string, 1)
assert_min('[$_]', random_string, 2)
assert_min('@', random_string, 5)
self.assertEqual(secret4.FnGetRefId(), random_string)
secret5 = stack['secret5']
random_string = secret5.FnGetAtt('value')
self.assertEqual(25, len(random_string))
assert_min('[0-9]', random_string, 1)
assert_min('[A-Z]', random_string, 1)
assert_min('[a-z]', random_string, 20)
self.assertEqual(secret5.FnGetRefId(), random_string)
secret6 = stack['secret6']
random_string = secret6.FnGetAtt('value')
self.assertEqual(10, len(random_string))
assert_min('[(),\[\]{}]', random_string, 1)
assert_min('[$_]', random_string, 2)
assert_min('@', random_string, 5)
self.assertEqual(secret6.FnGetRefId(), random_string)
# Prove the name is returned before create sets the ID
secret6.resource_id = None
self.assertEqual('secret6', secret6.FnGetRefId())
def test_invalid_property_combination(self):
template_random_string = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
secret:
Type: OS::Heat::RandomString
Properties:
length: 32
sequence: octdigits
character_classes:
- class: digits
min: 1
character_sequences:
- sequence: (),[]{}
min: 1
'''
exc = self.assertRaises(exception.StackValidationFailed,
self.create_stack, template_random_string)
self.assertEqual("Cannot use deprecated 'sequence' property along "
"with 'character_sequences' or 'character_classes' "
"properties", six.text_type(exc))
def test_invalid_length(self):
template_random_string = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
secret:
Type: OS::Heat::RandomString
Properties:
length: 5
character_classes:
- class: digits
min: 5
character_sequences:
- sequence: (),[]{}
min: 1
'''
exc = self.assertRaises(exception.StackValidationFailed,
self.create_stack, template_random_string)
self.assertEqual("Length property cannot be smaller than combined "
"character class and character sequence minimums",
six.text_type(exc))
def test_max_length(self):
template_random_string = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
secret:
Type: OS::Heat::RandomString
Properties:
length: 512
'''
stack = self.create_stack(template_random_string)
secret = stack['secret']
random_string = secret.FnGetAtt('value')
self.assertEqual(512, len(random_string))
self.assertEqual(secret.FnGetRefId(), random_string)
def test_exceeds_max_length(self):
template_random_string = '''
HeatTemplateFormatVersion: '2012-12-12'
Resources:
secret:
Type: OS::Heat::RandomString
Properties:
length: 513
'''
exc = self.assertRaises(exception.StackValidationFailed,
self.create_stack, template_random_string)
self.assertIn('513 is out of range (min: 1, max: 512)',
six.text_type(exc))
class TestGenerateRandomString(common.HeatTestCase):
scenarios = [
('lettersdigits', dict(
length=1, seq='lettersdigits', pattern='[a-zA-Z0-9]')),
('letters', dict(
length=10, seq='letters', pattern='[a-zA-Z]')),
('lowercase', dict(
length=100, seq='lowercase', pattern='[a-z]')),
('uppercase', dict(
length=50, seq='uppercase', pattern='[A-Z]')),
('digits', dict(
length=512, seq='digits', pattern='[0-9]')),
('hexdigits', dict(
length=16, seq='hexdigits', pattern='[A-F0-9]')),
('octdigits', dict(
length=32, seq='octdigits', pattern='[0-7]'))
]
def test_generate_random_string(self):
# run each test multiple times to confirm random generator
# doesn't generate a matching pattern by chance
for i in range(1, 32):
sequence = rs.RandomString._sequences[self.seq]
r = rs.RandomString._deprecated_random_string(sequence,
self.length)
self.assertThat(r, matchers.HasLength(self.length))
regex = '%s{%s}' % (self.pattern, self.length)
self.assertThat(r, matchers.MatchesRegex(regex))
| {
"content_hash": "39c14db314315a231d9d6d26d9c6ca43",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 77,
"avg_line_length": 32.113821138211385,
"alnum_prop": 0.5864556962025317,
"repo_name": "pshchelo/heat",
"id": "cec5b497ec9cf1bceaec16f030abfb0ec184aa21",
"size": "8475",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/tests/test_random_string.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5949441"
},
{
"name": "Shell",
"bytes": "25070"
}
],
"symlink_target": ""
} |
import mock
from oslo_config import cfg
from oslo_log import log as logging
from mistral.services import periodic
from mistral.services import security
from mistral.services import triggers
from mistral.services import workflows
from mistral.tests.unit.engine import base
LOG = logging.getLogger(__name__)
WORKFLOW_LIST = """
---
version: '2.0'
my_wf:
type: direct
tasks:
task1:
action: std.echo output='Hi!'
"""
class ProcessCronTriggerTest(base.EngineTestCase):
@mock.patch.object(security,
'create_trust',
type('trust', (object,), {'id': 'my_trust_id'}))
def test_start_workflow(self):
cfg.CONF.set_default('auth_enable', True, group='pecan')
wf = workflows.create_workflows(WORKFLOW_LIST)[0]
t = triggers.create_cron_trigger(
'test',
wf.name,
{},
{},
'* * * * * */1',
None,
None,
None
)
self.assertEqual('my_trust_id', t.trust_id)
cfg.CONF.set_default('auth_enable', False, group='pecan')
next_trigger = triggers.get_next_cron_triggers()[0]
next_execution_time_before = next_trigger.next_execution_time
periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None)
next_trigger = triggers.get_next_cron_triggers()[0]
next_execution_time_after = next_trigger.next_execution_time
# Checking the workflow was executed, by
# verifying that the next execution time changed.
self.assertNotEqual(
next_execution_time_before,
next_execution_time_after
)
def test_workflow_without_auth(self):
cfg.CONF.set_default('auth_enable', False, group='pecan')
wf = workflows.create_workflows(WORKFLOW_LIST)[0]
triggers.create_cron_trigger(
'test',
wf.name,
{},
{},
'* * * * * */1',
None,
None,
None
)
next_triggers = triggers.get_next_cron_triggers()
self.assertEqual(1, len(next_triggers))
next_trigger = next_triggers[0]
next_execution_time_before = next_trigger.next_execution_time
periodic.MistralPeriodicTasks(cfg.CONF).process_cron_triggers_v2(None)
next_triggers = triggers.get_next_cron_triggers()
self.assertEqual(1, len(next_triggers))
next_trigger = next_triggers[0]
next_execution_time_after = next_trigger.next_execution_time
self.assertNotEqual(
next_execution_time_before,
next_execution_time_after
)
| {
"content_hash": "c9feb10d1ae6100c8c3742c27195654b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 26.89,
"alnum_prop": 0.5939010784678319,
"repo_name": "dennybaa/mistral",
"id": "992dbd073d073c7acb7777f896c26a119aedbb86",
"size": "3298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/tests/unit/engine/test_cron_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "1037769"
},
{
"name": "Shell",
"bytes": "18657"
}
],
"symlink_target": ""
} |
from libcloud.common.types import LibcloudError
__all__ = [
'Provider',
'RecordType',
'ZoneError',
'ZoneDoesNotExistError',
'ZoneAlreadyExistsError',
'RecordError',
'RecordDoesNotExistError',
'RecordAlreadyExistsError'
]
class Provider(object):
DUMMY = 'dummy'
LINODE = 'linode'
RACKSPACE = 'rackspace'
ZERIGO = 'zerigo'
ROUTE53 = 'route53'
HOSTVIRTUAL = 'hostvirtual'
GANDI = 'gandi'
GOOGLE = 'google'
SOFTLAYER = 'softlayer'
DIGITAL_OCEAN = 'digitalocean'
AURORADNS = 'auroradns'
WORLDWIDEDNS = 'worldwidedns'
DNSIMPLE = 'dnsimple'
POINTDNS = 'pointdns'
VULTR = 'vultr'
LIQUIDWEB = 'liquidweb'
ZONOMI = 'zonomi'
# Deprecated
RACKSPACE_US = 'rackspace_us'
RACKSPACE_UK = 'rackspace_uk'
class RecordType(object):
"""
DNS record type.
"""
A = 'A'
AAAA = 'AAAA'
ALIAS = 'ALIAS'
MX = 'MX'
NS = 'NS'
CNAME = 'CNAME'
DNAME = 'DNAME'
HINFO = 'HINFO'
TXT = 'TXT'
PTR = 'PTR'
SOA = 'SOA'
SPF = 'SPF'
SRV = 'SRV'
SSHFP = 'SSHFP'
PTR = 'PTR'
NAPTR = 'NAPTR'
REDIRECT = 'REDIRECT'
GEO = 'GEO'
URL = 'URL'
WKS = 'WKS'
LOC = 'LOC'
class ZoneError(LibcloudError):
error_type = 'ZoneError'
kwargs = ('zone_id', )
def __init__(self, value, driver, zone_id):
self.zone_id = zone_id
super(ZoneError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, zone_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.zone_id, self.value))
class ZoneDoesNotExistError(ZoneError):
error_type = 'ZoneDoesNotExistError'
class ZoneAlreadyExistsError(ZoneError):
error_type = 'ZoneAlreadyExistsError'
class RecordError(LibcloudError):
error_type = 'RecordError'
def __init__(self, value, driver, record_id):
self.record_id = record_id
super(RecordError, self).__init__(value=value, driver=driver)
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<%s in %s, record_id=%s, value=%s>' %
(self.error_type, repr(self.driver),
self.record_id, self.value))
class RecordDoesNotExistError(RecordError):
error_type = 'RecordDoesNotExistError'
class RecordAlreadyExistsError(RecordError):
error_type = 'RecordAlreadyExistsError'
| {
"content_hash": "3ab6dc80d1a267e5fe2e06848f67444e",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 69,
"avg_line_length": 22.366071428571427,
"alnum_prop": 0.5900199600798404,
"repo_name": "carletes/libcloud",
"id": "192ae28c1d34b4a156c4036b8db5646e96aa8594",
"size": "3287",
"binary": false,
"copies": "2",
"ref": "refs/heads/trunk",
"path": "libcloud/dns/types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "Python",
"bytes": "4087198"
},
{
"name": "Shell",
"bytes": "13868"
}
],
"symlink_target": ""
} |
from sys import exit, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
from keras.models import Model, load_model
from subtlenet import config
import subtlenet.generators.gen as gen
from paths import basedir
from subtlenet.backend.layers import *
gen.truncate = int(argv[1])
config.limit = int(argv[2])
name = 'dense'
print 'inferring',name
shallow = load_model('dense_models/classifier_v4_trunc%i_limit%i_best.h5'%(gen.truncate, config.limit),
custom_objects={'DenseBroadcast':DenseBroadcast})
coll = gen.make_coll(basedir + '/PARTITION/*_CATEGORY.npy')
msd_norm_factor = 1. / config.max_mass
pt_norm_factor = 1. / (config.max_pt - config.min_pt)
msd_index = config.gen_singletons['msd']
pt_index = config.gen_singletons['pt']
def predict_t(data):
msd = data['singletons'][:,msd_index] * msd_norm_factor
pt = (data['singletons'][:,pt_index] - config.min_pt) * pt_norm_factor
if msd.shape[0] > 0:
if config.limit:
particles = data['particles'][:,:config.limit,:gen.truncate]
else:
particles = data['particles'][:,:,:gen.truncate]
r_shallow_t = shallow.predict([particles,msd,pt])[:,config.n_truth-1]
else:
r_shallow_t = np.empty((0,))
return r_shallow_t
coll.infer(['singletons','particles'], f=predict_t, name=name, partition='test')
| {
"content_hash": "629cb94ea2bd89c1989acef668db557a",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 103,
"avg_line_length": 33.90243902439025,
"alnum_prop": 0.6712230215827338,
"repo_name": "sidnarayanan/BAdNet",
"id": "f0ab4e674ef6d037089c8fad1bba28a8ac138452",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "train/gen/baseline/old/infer_dense.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "326584"
},
{
"name": "Shell",
"bytes": "900"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.