text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import numpy as np
# Example taken from : http://cs231n.github.io/python-numpy-tutorial/#numpy
x = np.array([[1,2],[3,4]])
print np.sum(x) # Compute sum of all elements; prints "10"
print np.sum(x, axis=0) # Compute sum of each column; prints "[4 6]"
print np.sum(x, axis=1) # Compute sum of each row; prints "[3 7]"
|
{
"content_hash": "9e263cc09b526fc312ff2496a3ff53ac",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 75,
"avg_line_length": 35.888888888888886,
"alnum_prop": 0.6687306501547987,
"repo_name": "kmova/bootstrap",
"id": "2bae1585270c95fac78cda000b47724f9bc0f8c9",
"size": "323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/py2docker/numpy-sum.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "614"
},
{
"name": "Go",
"bytes": "17385"
},
{
"name": "JavaScript",
"bytes": "269"
},
{
"name": "Makefile",
"bytes": "569"
},
{
"name": "Python",
"bytes": "843"
},
{
"name": "Ruby",
"bytes": "15920"
},
{
"name": "Shell",
"bytes": "26286"
},
{
"name": "Smarty",
"bytes": "516"
}
],
"symlink_target": ""
}
|
"""
Module contains in-memory rebalance tests.
"""
from ducktape.mark import defaults
from ignitetest.services.ignite import IgniteService
from ignitetest.services.utils.ignite_configuration.discovery import from_ignite_cluster
from ignitetest.tests.rebalance.util import preload_data, start_ignite, get_result, TriggerEvent, NUM_NODES, \
await_rebalance_start, RebalanceParams
from ignitetest.utils import cluster, ignite_versions
from ignitetest.utils.ignite_test import IgniteTest
from ignitetest.utils.version import DEV_BRANCH, LATEST
class RebalanceInMemoryTest(IgniteTest):
"""
Tests rebalance scenarios in in-memory mode.
"""
@cluster(num_nodes=NUM_NODES)
@ignite_versions(str(DEV_BRANCH), str(LATEST))
@defaults(backups=[1], cache_count=[1], entry_count=[15_000], entry_size=[50_000], preloaders=[1],
thread_pool_size=[None], batch_size=[None], batches_prefetch_count=[None], throttle=[None])
def test_node_join(self, ignite_version,
backups, cache_count, entry_count, entry_size, preloaders,
thread_pool_size, batch_size, batches_prefetch_count, throttle):
"""
Tests rebalance on node join.
"""
return self.__run(ignite_version, TriggerEvent.NODE_JOIN,
backups, cache_count, entry_count, entry_size, preloaders,
thread_pool_size, batch_size, batches_prefetch_count, throttle)
@cluster(num_nodes=NUM_NODES)
@ignite_versions(str(DEV_BRANCH), str(LATEST))
@defaults(backups=[1], cache_count=[1], entry_count=[15_000], entry_size=[50_000], preloaders=[1],
thread_pool_size=[None], batch_size=[None], batches_prefetch_count=[None], throttle=[None])
def test_node_left(self, ignite_version,
backups, cache_count, entry_count, entry_size, preloaders,
thread_pool_size, batch_size, batches_prefetch_count, throttle):
"""
Tests rebalance on node left.
"""
return self.__run(ignite_version, TriggerEvent.NODE_LEFT,
backups, cache_count, entry_count, entry_size, preloaders,
thread_pool_size, batch_size, batches_prefetch_count, throttle)
def __run(self, ignite_version, trigger_event,
backups, cache_count, entry_count, entry_size, preloaders,
thread_pool_size, batch_size, batches_prefetch_count, throttle):
"""
Test performs rebalance test which consists of following steps:
* Start cluster.
* Put data to it via IgniteClientApp.
* Triggering a rebalance event and awaits for rebalance to finish.
:param ignite_version: Ignite version.
:param trigger_event: Trigger event.
:param backups: Backup count.
:param cache_count: Cache count.
:param entry_count: Cache entry count.
:param entry_size: Cache entry size.
:param preloaders: Preload application nodes count.
:param thread_pool_size: rebalanceThreadPoolSize config property.
:param batch_size: rebalanceBatchSize config property.
:param batches_prefetch_count: rebalanceBatchesPrefetchCount config property.
:param throttle: rebalanceThrottle config property.
:return: Rebalance and data preload stats.
"""
reb_params = RebalanceParams(trigger_event=trigger_event, backups=backups, cache_count=cache_count,
entry_count=entry_count, entry_size=entry_size, preloaders=preloaders,
thread_pool_size=thread_pool_size, batch_size=batch_size,
batches_prefetch_count=batches_prefetch_count, throttle=throttle)
ignites = start_ignite(self.test_context, ignite_version, reb_params)
preload_time = preload_data(
self.test_context,
ignites.config._replace(client_mode=True, discovery_spi=from_ignite_cluster(ignites)),
rebalance_params=reb_params)
if trigger_event:
ignites.stop_node(ignites.nodes[-1])
rebalance_nodes = ignites.nodes[:-1]
else:
ignite = IgniteService(self.test_context,
ignites.config._replace(discovery_spi=from_ignite_cluster(ignites)), num_nodes=1)
ignite.start()
rebalance_nodes = ignite.nodes
await_rebalance_start(ignite)
ignite.await_rebalance()
return get_result(rebalance_nodes, preload_time, cache_count, entry_count, entry_size)
|
{
"content_hash": "e85a7c87a32be04d45d969288029689a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 116,
"avg_line_length": 50.02150537634409,
"alnum_prop": 0.6425193465176269,
"repo_name": "NSAmelchev/ignite",
"id": "2e10641afdd89a525c05704d3fb1df62c385c8bf",
"size": "5433",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/ducktests/tests/ignitetest/tests/rebalance/in_memory_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "54788"
},
{
"name": "C",
"bytes": "7601"
},
{
"name": "C#",
"bytes": "7740054"
},
{
"name": "C++",
"bytes": "4487801"
},
{
"name": "CMake",
"bytes": "54473"
},
{
"name": "Dockerfile",
"bytes": "11909"
},
{
"name": "FreeMarker",
"bytes": "15591"
},
{
"name": "HTML",
"bytes": "14341"
},
{
"name": "Java",
"bytes": "50117357"
},
{
"name": "JavaScript",
"bytes": "1085"
},
{
"name": "Jinja",
"bytes": "32958"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "PHP",
"bytes": "11079"
},
{
"name": "PowerShell",
"bytes": "9247"
},
{
"name": "Python",
"bytes": "330115"
},
{
"name": "Scala",
"bytes": "425434"
},
{
"name": "Shell",
"bytes": "311510"
}
],
"symlink_target": ""
}
|
import logging
import re
import uuid
from smtplib import SMTPException
from django.conf import settings
from django.contrib import auth, messages
from django.core.mail import send_mail
from django.db import IntegrityError
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, redirect, render
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from apps.authentication.forms import ChangePasswordForm, LoginForm, RecoveryForm, RegisterForm
from apps.authentication.models import Email
from apps.authentication.models import OnlineUser as User
from apps.authentication.models import RegisterToken
@sensitive_post_parameters()
def login(request):
redirect_url = request.GET.get('next', '')
if request.method == 'POST':
form = LoginForm(request.POST)
if form.login(request):
messages.success(request, _('Du er nå logget inn.'), extra_tags='data-dismiss')
if redirect_url:
return HttpResponseRedirect(redirect_url)
return HttpResponseRedirect('/')
else:
form = LoginForm(request.POST, auto_id=True)
else:
form = LoginForm()
response_dict = {'form': form, 'next': redirect_url}
return render(request, 'auth/login.html', response_dict)
def logout(request):
auth.logout(request)
messages.success(request, _('Du er nå logget ut.'), extra_tags='data-dismiss')
return HttpResponseRedirect('/')
@sensitive_post_parameters()
def register(request):
log = logging.getLogger(__name__)
if request.user.is_authenticated:
messages.error(request, _('Registrering av ny konto krever at du er logget ut.'))
return HttpResponseRedirect('/')
else:
if request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
cleaned = form.cleaned_data
# Create user
user = User(
username=cleaned['username'],
first_name=cleaned['first_name'].title(),
last_name=cleaned['last_name'].title(),
)
# Set remaining fields
user.phone_number = cleaned['phone']
user.address = cleaned['address'].title()
user.zip_code = cleaned['zip_code']
# Store password properly
user.set_password(cleaned['password'])
# Users need to be manually activated
user.is_active = False
user.save()
# Set email address
email = Email(
user=user,
email=cleaned['email'].lower(),
)
email.primary = True
email.save()
# Create the registration token
token = uuid.uuid4().hex
try:
rt = RegisterToken(user=user, email=email.email, token=token)
rt.save()
log.info('Successfully registered token for %s' % request.user)
except IntegrityError as ie:
log.error('Failed to register token for "%s" due to "%s"' % (request.user, ie))
email_context = {}
verify_url = reverse('auth_verify', args=(token,))
email_context['verify_url'] = request.build_absolute_uri(verify_url)
message = render_to_string('auth/email/welcome_tpl.txt', email_context)
try:
send_mail(_('Verifiser din konto'), message, settings.DEFAULT_FROM_EMAIL, [email.email, ])
except SMTPException:
messages.error(request, 'Det oppstod en kritisk feil, epostadressen er ugyldig!')
return redirect('home')
messages.success(
request,
_('Registreringen var vellykket. Se tilsendt epost for verifiseringsinstrukser.')
)
return HttpResponseRedirect('/')
else:
form = RegisterForm(request.POST, auto_id=True)
else:
form = RegisterForm()
return render(request, 'auth/register.html', {'form': form, })
def verify(request, token):
log = logging.getLogger(__name__)
rt = get_object_or_404(RegisterToken, token=token)
if rt.is_valid:
email = get_object_or_404(Email, email=rt.email)
email.verified = True
email.save()
user = getattr(rt, 'user')
# If it is a stud email, set the ntnu_username for user
if re.match(r'[^@]+@stud\.ntnu\.no', rt.email):
user.ntnu_username = rt.email.split("@")[0]
log.info('Set ntnu_username for user %s to %s' % (user, rt.email))
# Check if Online-member, and set Infomail to True is he/she is
if user.is_member:
user.infomail = True
user_activated = False
if not user.is_active:
user.is_active = True
user_activated = True
user.save()
rt.delete()
if user_activated:
log.info('New user %s was activated' % user)
user.backend = "django.contrib.auth.backends.ModelBackend"
auth.login(request, user)
messages.success(request, _(
'Bruker %s ble aktivert. Du er nå logget inn. '
'Kikk rundt på "Min Side" for å oppdatere profilinnstillinger.') % user.username)
return redirect('profiles')
else:
log.info('New email %s was verified for user %s' % (email, user))
messages.success(request, _('Eposten %s er nå verifisert.') % email)
return redirect('profile_add_email')
else:
log.debug('Failed to verify email due to invalid register token')
messages.error(request, _('Denne lenken er utløpt. Bruk gjenopprett passord for å få tilsendt en ny lenke.'))
return HttpResponseRedirect('/')
def recover(request):
log = logging.getLogger(__name__)
if request.user.is_authenticated:
messages.error(request, _('Gjenoppretning av passord krever at du er logget ut.'))
return HttpResponseRedirect('/')
else:
if request.method == 'POST':
form = RecoveryForm(request.POST)
if form.is_valid():
email_string = form.cleaned_data['email']
emails = Email.objects.filter(email=email_string)
if len(emails) == 0:
messages.error(request, _('Denne eposten er ikke registrert i våre systemer.'))
return HttpResponseRedirect('/')
email = emails[0]
# Create the registration token
token = uuid.uuid4().hex
try:
rt = RegisterToken(user=email.user, email=email.email, token=token)
rt.save()
log.info('Successfully registered token for %s' % request.user)
except IntegrityError as ie:
log.error('Failed to register token for "%s" due to "%s"' % (request.user, ie))
raise ie
email_context = {}
email_context['email'] = email.email
email_context['username'] = email.user.username
set_password_url = reverse('auth_set_password', args=(token,))
email_context['reset_url'] = request.build_absolute_uri(set_password_url)
email_message = render_to_string('auth/email/password_reset_tpl.txt', email_context)
send_mail(_('Gjenoppretting av passord'), email_message, settings.DEFAULT_FROM_EMAIL, [email.email, ])
messages.success(request, _('En lenke for gjenoppretting har blitt sendt til %s.') % email.email)
return HttpResponseRedirect('/')
else:
form = RecoveryForm(request.POST, auto_id=True)
else:
form = RecoveryForm()
return render(request, 'auth/recover.html', {'form': form})
@sensitive_post_parameters()
def set_password(request, token=None):
log = logging.getLogger(__name__)
if request.user.is_authenticated:
return HttpResponseRedirect('/')
else:
rt = None
try:
rt = RegisterToken.objects.get(token=token)
except RegisterToken.DoesNotExist:
log.debug('%s tried to set password with nonexisting/expired token %s' % (request.user, token))
messages.error(request, 'Denne lenken er utløpt. Bruk gjenopprett passord for å få tilsendt en ny lenke.')
if rt and rt.is_valid:
if request.method == 'POST':
form = ChangePasswordForm(request.POST, auto_id=True)
if form.is_valid():
user = getattr(rt, 'user')
user.is_active = True
user.set_password(form.cleaned_data['new_password'])
user.save()
rt.delete()
messages.success(
request,
_('Passordgjenoppretting gjennomført for "%s". ' +
'Du kan nå logge inn.') % user.username
)
log.info('User "%s" successfully recovered password.' % request.user)
return HttpResponseRedirect('/')
else:
messages.error(request, 'Noe gikk galt med gjenoppretting av passord. Vennligst prøv igjen.')
log.debug('User %s failed to recover password with token %s. '
'[form.is_valid => False]' % (request.user, rt))
return HttpResponseRedirect('/')
else:
form = ChangePasswordForm()
messages.success(request, _('Lenken er akseptert. Vennligst skriv inn ønsket passord.'))
return render(request, 'auth/set_password.html', {'form': form, 'token': token})
log.debug('User %s failed to recover password with token %s.' % (request.user, rt))
messages.error(
request, 'Noe gikk galt med gjenoppretning av passord. Vennligst prøv igjen.')
return HttpResponseRedirect('/')
|
{
"content_hash": "c2a31e5a9bf6e2420b810a7aa3790b8a",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 118,
"avg_line_length": 40.353846153846156,
"alnum_prop": 0.5702439954250857,
"repo_name": "dotKom/onlineweb4",
"id": "5cefa9ccadfb1f5eb871b23be625dc5903fb1462",
"size": "10534",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/authentication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "71414"
},
{
"name": "HTML",
"bytes": "463894"
},
{
"name": "JavaScript",
"bytes": "745404"
},
{
"name": "Python",
"bytes": "925584"
},
{
"name": "Shell",
"bytes": "3130"
},
{
"name": "Standard ML",
"bytes": "1088"
}
],
"symlink_target": ""
}
|
from gluon import *
from s3 import *
from s3layouts import *
try:
from .layouts import *
except ImportError:
pass
import s3menus as default
# Below is an example which you can base your own template's menus.py on
# - there are also other examples in the other templates folders
# =============================================================================
class S3MainMenu(default.S3MainMenu):
"""
Custom Application Main Menu:
The main menu consists of several sub-menus, each of which can
be customised separately as a method of this class. The overall
composition of the menu is defined in the menu() method, which can
be customised as well:
Function Sub-Menu Access to (standard)
menu_modules() the modules menu the Eden modules
menu_gis() the GIS menu GIS configurations
menu_admin() the Admin menu System/User Administration
menu_lang() the Language menu Selection of the GUI locale
menu_auth() the User menu Login, Logout, User Profile
menu_help() the Help menu Contact page, About page
The standard uses the MM layout class for main menu items - but you
can of course use a custom layout class which you define in layouts.py.
Additional sub-menus can simply be defined as additional functions in
this class, and then be included in the menu() method.
Each sub-menu function returns a list of menu items, only the menu()
function must return a layout class instance.
"""
# -------------------------------------------------------------------------
@classmethod
def menu_modules(cls):
""" Custom Modules Menu """
return [
homepage(name="EVASS"),
homepage("gis"),
# homepage("evr")(
# MM("Persons", f="person"),
# MM("Groups", f="group")
# ),
# homepage("cr"),
homepage("msg"),
# homepage("event"),
# homepage("irs"),
# homepage("vol"),
# homepage("hrm"),
#MM("more", link=False)(
#),
]
# =============================================================================
class S3OptionsMenu(default.S3OptionsMenu):
#"""
#Custom Controller Menus
#The options menu (left-hand options menu) is individual for each
#controller, so each controller has its own options menu function
#in this class.
#Each of these option menu functions can be customised separately,
#by simply overriding (re-defining) the default function. The
#options menu function must return an instance of the item layout.
#The standard menu uses the M item layout class, but you can of
#course also use any other layout class which you define in
#layouts.py (can also be mixed).
#Make sure additional helper functions in this class don't match
#any current or future controller prefix (e.g. by using an
#underscore prefix).
#"""
# -------------------------------------------------------------------------
def evr(self):
""" EVR / Evacuees Registry """
return M(c="evr")(
M("Persons", f="person")(
M("New", m="create"),
M("Import", m="import")
),
M("Groups", f="group")(
M("New", m="create"),
),
)
# END =========================================================================
|
{
"content_hash": "87a1679121aa72c35834a47069b36dee",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 79,
"avg_line_length": 37.29,
"alnum_prop": 0.5089836417270046,
"repo_name": "code-for-india/sahana_shelter_worldbank",
"id": "4e2bf6cc5d19520f4dabcaa24d1d76d48c4af492",
"size": "3754",
"binary": false,
"copies": "1",
"ref": "refs/heads/hackathon",
"path": "private/templates/EVASS/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1214342"
},
{
"name": "JavaScript",
"bytes": "16755282"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "27298931"
},
{
"name": "Shell",
"bytes": "893"
},
{
"name": "XSLT",
"bytes": "2245739"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from unittest import mock
from murano.common import server
from murano.services import states
from murano.tests.unit import base
from murano.tests.unit import utils as test_utils
class ServerTest(base.MuranoTestCase):
@classmethod
def setUpClass(cls):
super(ServerTest, cls).setUpClass()
cls.result_endpoint = server.ResultEndpoint()
cls.dummy_context = test_utils.dummy_context()
@mock.patch('murano.common.server.status_reporter.get_notifier')
@mock.patch('murano.common.server.LOG')
@mock.patch('murano.common.server.get_last_deployment')
@mock.patch('murano.common.server.models')
@mock.patch('murano.common.server.session')
def test_process_result(self, mock_db_session, mock_models,
mock_last_deployment, mock_log, mock_notifier):
test_result = {
'model': {
'Objects': {
'applications': ['app1', 'app2'],
'services': ['service1', 'service2']
}
},
'action': {
'isException': False
}
}
mock_env = mock.MagicMock(id='test_env_id',
tenant_id='test_tenant_id',
description=None,
version=1)
mock_db_session.get_session().query().get.return_value = mock_env
mock_db_session.get_session().query().filter_by().count.\
return_value = 0
self.result_endpoint.process_result(self.dummy_context, test_result,
'test_env_id')
self.assertEqual(mock_env.description, test_result['model'])
self.assertEqual(2, mock_env.version)
self.assertEqual(test_result['action'],
mock_last_deployment().result)
self.assertEqual('Deployment finished',
mock_models.Status().text)
self.assertEqual('info', mock_models.Status().level)
mock_last_deployment().statuses.append.assert_called_once_with(
mock_models.Status())
mock_db_session.get_session().query().filter_by.assert_any_call(
**{'environment_id': mock_env.id,
'state': states.SessionState.DEPLOYING})
self.assertEqual(
states.SessionState.DEPLOYED,
mock_db_session.get_session().query().filter_by().first().state)
mock_log.info.assert_called_once_with(
'EnvId: {env_id} TenantId: {tenant_id} Status: '
'Successful Apps: {services}'
.format(env_id=mock_env.id,
tenant_id=mock_env.tenant_id,
services=test_result['model']['Objects']['services']))
mock_notifier.return_value.report.assert_called_once_with(
'environment.deploy.end',
mock_db_session.get_session().query().get(mock_env.id).to_dict())
@mock.patch('murano.common.server.LOG')
@mock.patch('murano.common.server.get_last_deployment')
@mock.patch('murano.common.server.models')
@mock.patch('murano.common.server.session')
def test_process_result_with_errors(self, mock_db_session, mock_models,
mock_last_deployment, mock_log):
test_result = {
'model': {
'Objects': {
'applications': ['app1', 'app2'],
'services': ['service1', 'service2']
}
},
'action': {
'isException': True
}
}
mock_env = mock.MagicMock(id='test_env_id',
tenant_id='test_tenant_id',
description=None,
version=1)
mock_db_session.get_session().query().get.return_value = mock_env
mock_db_session.get_session().query().filter_by().count.\
return_value = 1
self.result_endpoint.process_result(self.dummy_context, test_result,
'test_env_id')
self.assertEqual(mock_env.description, test_result['model'])
self.assertEqual(test_result['action'],
mock_last_deployment().result)
self.assertEqual('Deployment finished with errors',
mock_models.Status().text)
mock_last_deployment().statuses.append.assert_called_once_with(
mock_models.Status())
mock_db_session.get_session().query().filter_by.assert_any_call(
**{'environment_id': mock_env.id,
'state': states.SessionState.DEPLOYING})
self.assertEqual(
states.SessionState.DEPLOY_FAILURE,
mock_db_session.get_session().query().filter_by().first().state)
mock_log.warning.assert_called_once_with(
'EnvId: {env_id} TenantId: {tenant_id} Status: '
'Failed Apps: {services}'
.format(env_id=mock_env.id,
tenant_id=mock_env.tenant_id,
services=test_result['model']['Objects']['services']))
@mock.patch('murano.common.server.LOG')
@mock.patch('murano.common.server.get_last_deployment')
@mock.patch('murano.common.server.models')
@mock.patch('murano.common.server.session')
def test_process_result_with_warnings(self, mock_db_session, mock_models,
mock_last_deployment, mock_log):
test_result = {
'model': {
'Objects': None,
'ObjectsCopy': ['object1', 'object2']
},
'action': {
'isException': True
}
}
mock_env = mock.MagicMock(id='test_env_id',
tenant_id='test_tenant_id',
description=None,
version=1)
mock_db_session.get_session().query().get.return_value = mock_env
# num_errors will be initialized to 0, num_warnings to 1
mock_db_session.get_session().query().filter_by().count.\
side_effect = [0, 1]
self.result_endpoint.process_result(self.dummy_context, test_result,
'test_env_id')
self.assertEqual(mock_env.description, test_result['model'])
self.assertEqual(test_result['action'],
mock_last_deployment().result)
self.assertEqual('Deletion finished with warnings',
mock_models.Status().text)
mock_last_deployment().statuses.append.assert_called_once_with(
mock_models.Status())
mock_db_session.get_session().query().filter_by.assert_any_call(
**{'environment_id': mock_env.id,
'state': states.SessionState.DELETING})
self.assertEqual(
states.SessionState.DELETE_FAILURE,
mock_db_session.get_session().query().filter_by().first().state)
mock_log.warning.assert_called_once_with(
'EnvId: {env_id} TenantId: {tenant_id} Status: '
'Failed Apps: {services}'
.format(env_id=mock_env.id,
tenant_id=mock_env.tenant_id,
services=[]))
@mock.patch('murano.common.server.LOG')
@mock.patch('murano.common.server.session')
def test_process_result_with_no_environment(self, mock_db_session,
mock_log):
test_result = {'model': None}
mock_db_session.get_session().query().get.return_value = None
result = self.result_endpoint.process_result(self.dummy_context,
test_result,
'test_env_id')
self.assertIsNone(result)
mock_log.warning.assert_called_once_with(
'Environment result could not be handled, '
'specified environment not found in database')
@mock.patch('murano.common.server.environments')
@mock.patch('murano.common.server.session')
def test_process_result_with_no_objects(self, mock_db_session,
mock_environments):
test_result = {'model': {'Objects': None, 'ObjectsCopy': None}}
result = self.result_endpoint.process_result(self.dummy_context,
test_result,
'test_env_id')
self.assertIsNone(result)
mock_environments.EnvironmentServices.remove.assert_called_once_with(
'test_env_id')
@mock.patch('murano.common.server.instances')
def test_track_instance(self, mock_instances):
test_payload = {
'instance': 'test_instance',
'instance_type': 'test_instance_type',
'environment': 'test_environment',
'unit_count': 'test_unit_count',
'type_name': 'test_type_name',
'type_title': 'test_type_title'
}
server.track_instance(test_payload)
mock_instances.InstanceStatsServices.track_instance.\
assert_called_once_with(test_payload['instance'],
test_payload['environment'],
test_payload['instance_type'],
test_payload['type_name'],
test_payload['type_title'],
test_payload['unit_count'])
@mock.patch('murano.common.server.instances')
def test_untrack_instance(self, mock_instances):
test_payload = {
'instance': 'test_instance',
'environment': 'test_environment'
}
server.untrack_instance(test_payload)
mock_instances.InstanceStatsServices.destroy_instance.\
assert_called_once_with(test_payload['instance'],
test_payload['environment'])
@mock.patch('murano.common.server.get_last_deployment')
@mock.patch('murano.common.server.session')
@mock.patch('murano.common.server.models')
def test_report_notification(self, mock_models, mock_db_session,
mock_last_deployment):
mock_last_deployment.return_value = mock.MagicMock(
id='test_deployment_id')
test_report = {
'id': 'test_report_id',
'timestamp': datetime.now().isoformat(),
'created': None
}
server.report_notification(test_report)
self.assertIsNotNone(test_report['created'])
mock_models.Status().update.assert_called_once_with(test_report)
self.assertEqual('test_deployment_id', mock_models.Status().task_id)
mock_db_session.get_session().add.assert_called_once_with(
mock_models.Status())
def test_get_last_deployment(self):
mock_unit = mock.MagicMock()
result = server.get_last_deployment(mock_unit, 'test_env_id')
self.assertEqual(mock_unit.query().filter_by().order_by().first(),
result)
mock_unit.query().filter_by.assert_any_call(
environment_id='test_env_id')
def test_service_class(self):
service = server.Service()
self.assertIsNone(service.server)
# Test stop server.
service.server = mock.MagicMock()
service.stop(graceful=True)
service.server.stop.assert_called_once_with()
service.server.wait.assert_called_once_with()
# Test reset server.
service.reset()
service.server.reset.assert_called_once_with()
@mock.patch('murano.common.rpc.messaging')
def test_notification_service_class(self, mock_messaging):
mock_server = mock.MagicMock()
mock_messaging.get_notification_listener.return_value = mock_server
notification_service = server.NotificationService()
self.assertIsNone(notification_service.server)
notification_service.start()
self.assertEqual(1,
mock_messaging.get_notification_listener.call_count)
mock_server.start.assert_called_once_with()
@mock.patch('murano.common.rpc.messaging')
def test_api_service_class(self, mock_messaging):
mock_server = mock.MagicMock()
mock_messaging.get_rpc_server.return_value = mock_server
api_service = server.ApiService()
api_service.start()
self.assertEqual(1,
mock_messaging.get_rpc_server.call_count)
mock_server.start.assert_called_once_with()
|
{
"content_hash": "21ec0dacec46e681d10f9c7e96abd5c6",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 77,
"avg_line_length": 44.048442906574394,
"alnum_prop": 0.558051846032993,
"repo_name": "openstack/murano",
"id": "b56f4cc6dd33fb564b5e8907649b5aadd6624b73",
"size": "13355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "murano/tests/unit/common/test_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "2026"
},
{
"name": "PowerShell",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "1817159"
},
{
"name": "Shell",
"bytes": "37531"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
from flask import render_template, jsonify
from flask.ext.login import login_required
from app.service import webcomicsService
mod = Blueprint('webcomics', __name__, )
@mod.route('/', methods=["GET"])
@login_required
def index():
comics_meta = webcomicsService.get_comics_meta_info()
return render_template('pages/webcomics_widget.html', comics=comics_meta)
@mod.route('/<comic_id>/sync', methods=["POST"])
@login_required
def sync_webcomic(comic_id):
ret = webcomicsService.sync(comic_id)
return jsonify(resp=ret)
|
{
"content_hash": "d2b297f77b972382e99dff57b6fac96d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.7327433628318584,
"repo_name": "arpitbbhayani/penny",
"id": "e723937a6891e02669fcb9a1e4525886fc758e3b",
"size": "565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views/user/webcomics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "23"
},
{
"name": "CSS",
"bytes": "2022061"
},
{
"name": "HTML",
"bytes": "31272"
},
{
"name": "JavaScript",
"bytes": "2265006"
},
{
"name": "Lex",
"bytes": "1094"
},
{
"name": "Makefile",
"bytes": "1071"
},
{
"name": "Python",
"bytes": "56089"
},
{
"name": "Shell",
"bytes": "507"
},
{
"name": "Yacc",
"bytes": "5995"
}
],
"symlink_target": ""
}
|
"""Test suite that runs all NLTK tests.
This module, `nltk.test.all`, is named as the NLTK ``test_suite`` in the
project's ``setup-eggs.py`` file. Here, we create a test suite that
runs all of our doctests, and return it for processing by the setuptools
test harness.
"""
import doctest, unittest
from glob import glob
import os.path
def additional_tests():
#print "here-000000000000000"
#print "-----", glob(os.path.join(os.path.dirname(__file__), '*.doctest'))
dir = os.path.dirname(__file__)
paths = glob(os.path.join(dir, '*.doctest'))
files = [ os.path.basename(path) for path in paths ]
return unittest.TestSuite(
[ doctest.DocFileSuite(file) for file in files ]
)
#if os.path.split(path)[-1] != 'index.rst'
# skips time-dependent doctest in index.rst
|
{
"content_hash": "1db4fe7bbade75d6848ec61a7dbd9bec",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 78,
"avg_line_length": 35.869565217391305,
"alnum_prop": 0.656969696969697,
"repo_name": "enriquesanchezb/practica_utad_2016",
"id": "c1ce9962bd26cd64d4d6f36ebbed370fd20633c8",
"size": "825",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/nltk/test/all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5785"
},
{
"name": "HTML",
"bytes": "284450"
},
{
"name": "JavaScript",
"bytes": "20876"
},
{
"name": "Python",
"bytes": "6659896"
},
{
"name": "Shell",
"bytes": "3296"
}
],
"symlink_target": ""
}
|
from django import forms
from private_messages.models import ConversationLog
class SendMessage(forms.Form):
content = forms.CharField(widget=forms.Textarea)
encrypted = forms.BooleanField(required=False)
def __init__(self, *args, **kwargs):
super(SendMessage, self).__init__(*args, **kwargs)
self.fields['content'].widget.attrs['rows'] = 5
self.fields['content'].widget.attrs['cols'] = 90
class ConversationForm(forms.Form):
participants = forms.CharField(widget=forms.Textarea)
log = forms.CharField(widget=forms.Textarea)
#class Meta:
# model = ConversationLog
# fields = ('participants', 'log')
|
{
"content_hash": "1b0e076ab3392c1bc255cdbc3d02b463",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 54,
"avg_line_length": 36.11764705882353,
"alnum_prop": 0.739413680781759,
"repo_name": "jnayak1/cs3240-s16-team16",
"id": "32dc55ef3c08ce03624f51b1ca5a1d4e0dd07642",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private_messages/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "417"
},
{
"name": "Python",
"bytes": "8227"
}
],
"symlink_target": ""
}
|
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
LOG = log.getLogger(__name__)
class HostMaintenance(base.HostMaintenanceBaseStrategy):
"""[PoC]Host Maintenance
*Description*
It is a migration strategy for one compute node maintenance,
without having the user's application been interruptted.
If given one backup node, the strategy will firstly
migrate all instances from the maintenance node to
the backup node. If the backup node is not provided,
it will migrate all instances, relying on nova-scheduler.
*Requirements*
* You must have at least 2 physical compute nodes to run this strategy.
*Limitations*
- This is a proof of concept that is not meant to be used in production
- It migrates all instances from one host to other hosts. It's better to
execute such strategy when load is not heavy, and use this algorithm
with `ONESHOT` audit.
- It assumes that cold and live migrations are possible.
"""
INSTANCE_MIGRATION = "migrate"
CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state"
REASON_FOR_DISABLE = 'watcher_disabled'
def __init__(self, config, osc=None):
super(HostMaintenance, self).__init__(config, osc)
@classmethod
def get_name(cls):
return "host_maintenance"
@classmethod
def get_display_name(cls):
return _("Host Maintenance Strategy")
@classmethod
def get_translatable_display_name(cls):
return "Host Maintenance Strategy"
@classmethod
def get_schema(cls):
return {
"properties": {
"maintenance_node": {
"description": "The name of the compute node which "
"need maintenance",
"type": "string",
},
"backup_node": {
"description": "The name of the compute node which "
"will backup the maintenance node.",
"type": "string",
},
},
"required": ["maintenance_node"],
}
def get_disabled_compute_nodes_with_reason(self, reason=None):
return {uuid: cn for uuid, cn in
self.compute_model.get_all_compute_nodes().items()
if cn.state == element.ServiceState.ONLINE.value and
cn.status == element.ServiceState.DISABLED.value and
cn.disabled_reason == reason}
def get_disabled_compute_nodes(self):
return self.get_disabled_compute_nodes_with_reason(
self.REASON_FOR_DISABLE)
def get_instance_state_str(self, instance):
"""Get instance state in string format"""
if isinstance(instance.state, str):
return instance.state
elif isinstance(instance.state, element.InstanceState):
return instance.state.value
else:
LOG.error('Unexpected instance state type, '
'state=%(state)s, state_type=%(st)s.',
dict(state=instance.state,
st=type(instance.state)))
raise exception.WatcherException
def get_node_status_str(self, node):
"""Get node status in string format"""
if isinstance(node.status, str):
return node.status
elif isinstance(node.status, element.ServiceState):
return node.status.value
else:
LOG.error('Unexpected node status type, '
'status=%(status)s, status_type=%(st)s.',
dict(status=node.status,
st=type(node.status)))
raise exception.WatcherException
def get_node_capacity(self, node):
"""Collect cpu, ram and disk capacity of a node.
:param node: node object
:return: dict(cpu(cores), ram(MB), disk(B))
"""
return dict(cpu=node.vcpu_capacity,
ram=node.memory_mb_capacity,
disk=node.disk_gb_capacity)
def host_fits(self, source_node, destination_node):
"""check host fits
return True if VMs could intensively migrate
from source_node to destination_node.
"""
source_node_used = self.compute_model.get_node_used_resources(
source_node)
destination_node_free = self.compute_model.get_node_free_resources(
destination_node)
metrics = ['vcpu', 'memory']
for m in metrics:
if source_node_used[m] > destination_node_free[m]:
return False
return True
def add_action_enable_compute_node(self, node):
"""Add an action for node enabler into the solution."""
params = {'state': element.ServiceState.ENABLED.value,
'resource_name': node.hostname}
self.solution.add_action(
action_type=self.CHANGE_NOVA_SERVICE_STATE,
resource_id=node.uuid,
input_parameters=params)
def add_action_maintain_compute_node(self, node):
"""Add an action for node maintenance into the solution."""
params = {'state': element.ServiceState.DISABLED.value,
'disabled_reason': self.REASON_FOR_MAINTAINING,
'resource_name': node.hostname}
self.solution.add_action(
action_type=self.CHANGE_NOVA_SERVICE_STATE,
resource_id=node.uuid,
input_parameters=params)
def enable_compute_node_if_disabled(self, node):
node_status_str = self.get_node_status_str(node)
if node_status_str != element.ServiceState.ENABLED.value:
self.add_action_enable_compute_node(node)
def instance_migration(self, instance, src_node, des_node=None):
"""Add an action for instance migration into the solution.
:param instance: instance object
:param src_node: node object
:param des_node: node object. if None, the instance will be
migrated relying on nova-scheduler
:return: None
"""
instance_state_str = self.get_instance_state_str(instance)
if instance_state_str == element.InstanceState.ACTIVE.value:
migration_type = 'live'
else:
migration_type = 'cold'
params = {'migration_type': migration_type,
'source_node': src_node.uuid,
'resource_name': instance.name}
if des_node:
params['destination_node'] = des_node.uuid
self.solution.add_action(action_type=self.INSTANCE_MIGRATION,
resource_id=instance.uuid,
input_parameters=params)
def host_migration(self, source_node, destination_node):
"""host migration
Migrate all instances from source_node to destination_node.
Active instances use "live-migrate",
and other instances use "cold-migrate"
"""
instances = self.compute_model.get_node_instances(source_node)
for instance in instances:
self.instance_migration(instance, source_node, destination_node)
def safe_maintain(self, maintenance_node, backup_node=None):
"""safe maintain one compute node
Migrate all instances of the maintenance_node intensively to the
backup host. If the user didn't give the backup host, it will
select one unused node to backup the maintaining node.
It calculate the resource both of the backup node and maintaining
node to evaluate the migrations from maintaining node to backup node.
If all instances of the maintaining node can migrated to
the backup node, it will set the maintaining node in
'watcher_maintaining' status, and add the migrations to solution.
"""
# If the user gives a backup node with required capacity, then migrates
# all instances from the maintaining node to the backup node.
if backup_node:
if self.host_fits(maintenance_node, backup_node):
self.enable_compute_node_if_disabled(backup_node)
self.add_action_maintain_compute_node(maintenance_node)
self.host_migration(maintenance_node, backup_node)
return True
# If the user didn't give the backup host, select one unused
# node with required capacity, then migrates all instances
# from maintaining node to it.
nodes = sorted(
self.get_disabled_compute_nodes().values(),
key=lambda x: self.get_node_capacity(x)['cpu'])
if maintenance_node in nodes:
nodes.remove(maintenance_node)
for node in nodes:
if self.host_fits(maintenance_node, node):
self.enable_compute_node_if_disabled(node)
self.add_action_maintain_compute_node(maintenance_node)
self.host_migration(maintenance_node, node)
return True
return False
def try_maintain(self, maintenance_node):
"""try to maintain one compute node
It firstly set the maintenance_node in 'watcher_maintaining' status.
Then try to migrate all instances of the maintenance node, rely
on nova-scheduler.
"""
self.add_action_maintain_compute_node(maintenance_node)
instances = self.compute_model.get_node_instances(maintenance_node)
for instance in instances:
self.instance_migration(instance, maintenance_node)
def pre_execute(self):
self._pre_execute()
def do_execute(self, audit=None):
LOG.info(_('Executing Host Maintenance Migration Strategy'))
maintenance_node = self.input_parameters.get('maintenance_node')
backup_node = self.input_parameters.get('backup_node')
# if no VMs in the maintenance_node, just maintain the compute node
src_node = self.compute_model.get_node_by_name(maintenance_node)
if len(self.compute_model.get_node_instances(src_node)) == 0:
if (src_node.disabled_reason !=
self.REASON_FOR_MAINTAINING):
self.add_action_maintain_compute_node(src_node)
return
if backup_node:
des_node = self.compute_model.get_node_by_name(backup_node)
else:
des_node = None
if not self.safe_maintain(src_node, des_node):
self.try_maintain(src_node)
def post_execute(self):
"""Post-execution phase
This can be used to compute the global efficacy
"""
LOG.debug(self.solution.actions)
LOG.debug(self.compute_model.to_string())
|
{
"content_hash": "2c78d645edb120ebf274fe1abd360daa",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 39.201438848920866,
"alnum_prop": 0.6109377867498623,
"repo_name": "openstack/watcher",
"id": "a23ea07c8201cde708204bcfc3cdfac65a3939f1",
"size": "11550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watcher/decision_engine/strategy/strategies/host_maintenance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "2791159"
},
{
"name": "Shell",
"bytes": "19951"
}
],
"symlink_target": ""
}
|
"""``dev_ixia.py``
`Ixia traffic generators specific functionality`
"""
from . import loggers
from . import entry_template
from .Ixia.IxiaHAL import IxiaHALMixin
from .Ixia.IxiaHLT import IxiaHLTMixin
from .Ixia.IxLoadHL import IxLoadHL
from .packet_processor import PacketProcessor
from .tg_helpers import TGHelperMixin
class Ixia(IxiaHLTMixin, IxiaHALMixin, TGHelperMixin, PacketProcessor, entry_template.GenericEntry):
"""IXIA interaction base class.
"""
class_logger = loggers.ClassLogger()
def __init__(self, config, opts):
"""Initializes connection to IXIA.
Args:
config(dict): Configuration information.
opts(OptionParser): py.test config.option object which contains all py.test cli options.
"""
self.__opts = opts
self.__config = config
# Indicates if TG object supports high level protocol emulation (can emulate dialogs).
self.is_protocol_emulation_present = "tcl_server" in config
if self.is_protocol_emulation_present:
IxiaHLTMixin.__init__(self, config, opts)
IxiaHALMixin.__init__(self, config, opts)
self.ports, self.port_list = self._get_speed_ports()
self.ifaces = "{"
for iface in self.ports:
self.ifaces = self.ifaces + self._convert_iface(iface) + " "
self.ifaces = self.ifaces + "}"
# Configure port rate dictionary:
self.rate = {k: 1 for k in self.ports}
if "port_rate" in config:
for _key in config["port_rate"]:
self.rate[self.ports[int(_key) - 1]] = config["port_rate"][_key]
def _get_speed_ports(self):
"""Get ports with speed from config.
Returns:
tuple(list[tuple], list[tuple, int]): Tuple with list of ports used in real config and list of port/speed values
Notes:
This function check if port has speed in config file.
"""
ports = []
ports_list = []
if 'ports' in self.__config:
ports = [tuple(x) for x in self.__config["ports"]]
if "port_list" in self.__config:
ports = [tuple(x[0]) for x in self.__config["port_list"]]
ports_list = [[tuple(x[0]), x[1]] for x in self.__config["port_list"]]
return ports, ports_list
def connect(self):
"""Logs in to IXIA and takes ports ownership.
Returns:
None
"""
if self.is_protocol_emulation_present:
IxiaHLTMixin.connect(self)
IxiaHALMixin.connect(self)
def disconnect(self, mode='fast'):
"""Logs out from IXIA and clears ports ownership.
Returns:
None
"""
IxiaHALMixin.disconnect(self, mode)
if self.is_protocol_emulation_present:
IxiaHLTMixin.disconnect(self, mode)
def check(self):
"""Checking connection to IXIA.
Returns:
None
"""
if self.is_protocol_emulation_present:
IxiaHLTMixin.check(self)
IxiaHALMixin.check(self)
def create(self):
"""Obligatory class for entry_type = tg.
"""
if self.is_protocol_emulation_present:
IxiaHLTMixin.create(self)
IxiaHALMixin.create(self)
def destroy(self):
"""Obligatory class for entry_type = tg.
"""
self.class_logger.info("Destroying Ixia object...")
IxiaHALMixin.cleanup(self, mode="fast")
self.class_logger.info("IxHAL Cleanup finished.")
if not self.__opts.leave_on and not self.__opts.get_only:
self.class_logger.info("Disconnecting IxHAL...")
IxiaHALMixin.disconnect(self)
if self.is_protocol_emulation_present:
self.class_logger.info("Disconnecting IxNetwork...")
IxiaHLTMixin.disconnect(self, mode="fast")
def cleanup(self, mode="complete"):
"""This method should do Ixia ports cleanup (remove streams etc.).
Args:
mode(str): "fast" or "complete". If mode == "fast", method does not clear streams on the port (string)
"""
# TODO: Add stop_sniff etc
# TODO: Handle errors more gracefully
if self.is_protocol_emulation_present:
IxiaHLTMixin.cleanup(self, mode)
IxiaHALMixin.cleanup(self, mode)
def sanitize(self):
"""Clear ownership before exit.
"""
if self.is_protocol_emulation_present:
IxiaHLTMixin.sanitize(self)
IxiaHALMixin.sanitize(self)
def get_os_mtu(self, iface=None):
"""Get MTU value in host OS.
Args:
iface(str): Interface for getting MTU in host OS
Returns:
int: Original MTU value
Example::
env.tg[1].get_os_mtu(iface=ports[('tg1', 'sw1')][1])
"""
return 14000
def set_os_mtu(self, iface=None, mtu=None):
"""Set MTU value in host OS.
Args:
iface(str): Interface for changing MTU in host OS
mtu(int): New MTU value
Returns:
int: Original MTU value
Examples ::
env.tg[1].set_os_mtu(iface=ports[('tg1', 'sw1')][1], mtu=1650)
"""
return self.get_os_mtu(iface)
class IxiaLOAD(IxLoadHL, PacketProcessor, entry_template.GenericEntry):
"""IXIA interaction class based on IxLoadCsv.
"""
class_logger = loggers.ClassLogger()
ENTRY_TYPE = "tg"
INSTANCES = {"ixiahl": Ixia,
"ixload": IxiaLOAD,
}
NAME = "tg"
|
{
"content_hash": "f9ea50b312bf103d2fcd1d18d5901e37",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 125,
"avg_line_length": 28.31979695431472,
"alnum_prop": 0.5875604947123141,
"repo_name": "orestkreminskyi/taf",
"id": "cc56734634bcfd28575df87aba0b87858c5ec59a",
"size": "6173",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "taf/testlib/dev_ixia.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6745"
},
{
"name": "JavaScript",
"bytes": "1771"
},
{
"name": "Python",
"bytes": "3869203"
},
{
"name": "Shell",
"bytes": "3146"
},
{
"name": "Tcl",
"bytes": "68098"
},
{
"name": "XSLT",
"bytes": "41538"
}
],
"symlink_target": ""
}
|
"""
Contains miscellaneous helper functions.
"""
import cProfile
import io
import logging
import os
import pstats
import re
import shutil
import socket
import time
import warnings
import weakref
from functools import wraps
from subprocess import STDOUT, Popen, PIPE
# Log level constant for additional VERBOSE level
VERBOSE = 5
"""Verbose logging level"""
def schedule_as_coop_task (func):
"""
Decorator functions for running functions in an asynchronous way as a
microtask in recoco's cooperative multitasking context (in which POX was
written).
:param func: decorated function
:type func: func
:return: decorator function
:rtype: func
"""
from pox.core import core
# copy meta info from func to decorator for documentation generation
@wraps(func)
def decorator (*args, **kwargs):
# Use POX internal thread-safe wrapper for scheduling
return core.callLater(func, *args, **kwargs)
return decorator
def schedule_delayed_as_coop_task (delay=0):
"""
Decorator functions for running functions delayed in recoco's cooperative
multitasking context.
:param delay: delay in sec (default: 1s)
:type delay: int
:return: decorator function
:rtype: func
"""
def decorator (func):
from pox.core import core
if delay:
# If delay is set use callDelayed to call function delayed
@wraps(func)
def delayed_wrapper (*args, **kwargs):
# Use POX internal thread-safe wrapper for scheduling
return core.callDelayed(delay, func, *args, **kwargs)
# Return specific wrapper
return delayed_wrapper
else:
# If delay is not set use regular callLater to call function
@wraps(func)
def one_time_wrapper (*args, **kwargs):
# Use POX internal thread-safe wrapper for scheduling
return core.callLater(func, *args, **kwargs)
# Return specific wrapper
return one_time_wrapper
# Return the decorator
return decorator
def call_as_coop_task (func, *args, **kwargs):
"""
Schedule a coop microtask and run the given function with parameters in it.
Use POX core logic directly.
:param func: function need to run
:type func: func
:param args: nameless arguments
:param kwargs: named arguments
:return: None
"""
from pox.core import core
core.callLater(func, *args, **kwargs)
def call_delayed_as_coop_task (func, delay=0, *args, **kwargs):
"""
Schedule a coop microtask with a given time.
Use POX core logic directly.
:param delay: delay of time
:type delay: int
:param func: function need to run
:type func: func
:param args: nameless arguments
:type args: tuple
:param kwargs: named arguments
:type kwargs: dict
:return: None
"""
from pox.core import core
core.callDelayed(delay, func, *args, **kwargs)
def run_cmd (cmd):
"""
Run a shell command and return the output.
It's advisable to give the command with a raw string literal e.g.: r'ps aux'.
:param cmd: command
:type cmd: str or list or tuple
:return: output of the command
:rtype: str
"""
shell_cmd = ['/bin/sh', '-c']
if isinstance(cmd, basestring):
shell_cmd.append(cmd)
elif isinstance(cmd, (list, tuple)):
shell_cmd.append(' '.join(cmd))
return Popen(shell_cmd, stdout=PIPE, stderr=STDOUT).communicate()[0]
def enum (*sequential, **named):
# noinspection PyUnresolvedReferences
"""
Helper function to define enumeration. E.g.:
>>> Numbers1 = enum(ONE=1, TWO=2, THREE='three')
>>> Numbers = enum('ZERO', 'ONE', 'TWO')
>>> Numbers.ONE
1
>>> Numbers.reversed[2]
'TWO'
:param sequential: support automatic enumeration
:type sequential: str
:param named: support definition with unique keys
:type named: object
:return: Enum object
:rtype: dict
"""
enums = dict(zip(sequential, range(len(sequential))), **named)
enums['reversed'] = dict((value, key) for key, value in enums.iteritems())
return type('enum', (), enums)
def quit_with_error (msg, logger=None, exception=None):
"""
Helper function for quitting in case of an error.
:param msg: error message
:type msg: str
:param logger: logger name or logger object (default: core)
:type logger: str or :any:`logging.Logger`
:param exception: print stacktrace before quit (default: None)
:type exception: :any:`exceptions.Exception`
:return: None
"""
from pox.core import core
if isinstance(logger, str):
logger = core.getLogger(logger)
elif not isinstance(logger, logging.Logger):
logger = core.getLogger("core")
logger.fatal(msg)
if exception:
logger.exception("Caught exception: %s" % exception)
core.quit()
os._exit(os.EX_SOFTWARE)
def quit_with_ok (msg=None, logger=None):
"""
Helper function for quitting in case of an error.
:param msg: exit message
:type msg: str
:param logger: logger name or logger object (default: core)
:type logger: str or :any:`logging.Logger`
:return: None
"""
from pox.core import core
if isinstance(logger, str):
logger = core.getLogger(logger)
elif not isinstance(logger, logging.Logger):
logger = core.getLogger("core")
logger.info(msg if msg else "Exiting from ESCAPE...")
core.quit()
def quit_with_code (ret_code, msg=None, logger=None):
"""
Helper function for quitting with an explicit code.
:return: None
"""
from pox.core import core
if isinstance(logger, str):
logger = core.getLogger(logger)
elif not isinstance(logger, logging.Logger):
logger = core.getLogger("core")
logger.info(msg if msg else "Exiting from ESCAPE...")
core.set_return_value(ret_code)
core.quit()
def set_global_parameter (name, value):
"""
Set the given parameter globally based on the `core` object of POX.
Use the :any:`get_global_parameter`
:param name: global parameter name
:type name: str or int
:param value: parameter value
:type value: object
:return: None
"""
from pox.core import core
setattr(core, name, value)
def get_global_parameter (name):
"""
Return with the value of the given parameter which has been set by
:any`set_global_parameter` else None.
:param name: global parameter name
:type name: str or int
:return: parameter value
:rtype: object
"""
from pox.core import core
return getattr(core, name, None)
class SimpleStandaloneHelper(object):
"""
Helper class for layer APIs to catch events and handle these in separate
handler functions.
"""
def __init__ (self, container, cover_name):
"""
Init.
:param container: Container class reference
:type: EventMixin
:param cover_name: Container's name for logging
:type cover_name: str
:return: None
"""
from pox.lib.revent.revent import EventMixin
super(SimpleStandaloneHelper, self).__init__()
assert isinstance(container,
EventMixin), "container is not subclass of EventMixin"
self._container = weakref.proxy(container)
self._cover_name = cover_name
self._register_listeners()
def _register_listeners (self):
"""
Register event listeners.
If a listener is explicitly defined in the class use this function
otherwise use the common logger function
:return: None
"""
for event in self._container._eventMixin_events:
handler_name = "_handle_" + event.__class__.__name__
if hasattr(self, handler_name):
self._container.addListener(event, getattr(self, handler_name),
weak=True)
else:
self._container.addListener(event, self._log_event, weak=True)
def _log_event (self, event):
"""
Log given event.
:param event: Event object which need to be logged
:type event: Event
:return: None
"""
from pox.core import core
core.getLogger("StandaloneHelper").getChild(self._cover_name).info(
"Got event: %s from %s Layer" % (
event.__class__.__name__, str(event.source._core_name).title()))
class Singleton(type):
"""
Metaclass for classes need to be created only once.
Realize Singleton design pattern in a pythonic way.
"""
_instances = {}
# noinspection PyArgumentList
def __call__ (cls, *args, **kwargs):
"""
Override.
"""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def deprecated (func):
"""
This is a decorator which can be used to mark functions as deprecated. It
will result in a warning being emitted when the function is used.
:param func: original function
:type func: :any:`collections.Callable`
:return: decorated func
:rtype: :any:`collections.Callable`
"""
@wraps(func)
def newFunc (*args, **kwargs):
warnings.warn("Call to deprecated function %s." % func.__name__,
category=DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return newFunc
def remove_junks_at_shutdown (log=logging.getLogger("cleanup")):
"""
Remove junk files used/created by ESCAPE.
:param log: optional logger
:type log: :any:`logging.Logger`
:return: None
"""
if os.geteuid() != 0:
log.error("Cleanup process requires root privilege!")
return
log.debug("Remove SAP names from /etc/hosts...")
# Reset /etc/hosts file
os.system("sed '/# BEGIN ESCAPE SAPS/,/# END ESCAPE SAPS/d' "
"/etc/hosts > /etc/hosts2")
os.system("mv /etc/hosts2 /etc/hosts")
log.debug("Cleanup still running VNF-related processes...")
# Kill remained clickhelper.py/click
run_cmd(r"sudo pkill -9 -f netconfd")
run_cmd(r"sudo pkill -9 -f clickhelper")
run_cmd(r"sudo pkill -9 -f click")
log.debug("Delete any remained veth pair...")
veths = run_cmd(r"ip link show | egrep -o '(uny_\w+)'").split('\n')
# only need to del one end of the veth pair
for veth in veths[::2]:
if veth != '':
run_cmd(r"sudo ip link del %s" % veth)
log.debug("Remove remained xterms and stacked netconfd sockets...")
run_cmd("sudo pkill -f '%s'" % 'xterm -title "SAP')
# os.system("sudo pkill -f 'xterm -title SAP'")
log.debug("Cleanup any Mininet-specific junk...")
# Call Mininet's own cleanup stuff
from mininet.clean import cleanup
cleanup()
def remove_junks_at_boot (log=logging.getLogger("cleanup")):
"""
Remove junk files used/created by ESCAPE.
:param log: optional logger
:type log: :any:`logging.Logger`
:return: None
"""
if os.geteuid() != 0:
log.error("Cleanup process requires root privilege!")
return
log.debug("Remove remained log files of VNF, agent and netconfd instances "
"from previous run...")
log.debug("Remove trails...")
trails = os.getcwd() + "/log/trails"
if os.path.exists(trails):
for f in os.listdir(os.getcwd() + "/log/trails"):
if f != ".placeholder" and not f.startswith(time.strftime("%Y%m%d")):
shutil.rmtree(os.path.join(os.getcwd(), "log/trails", f),
ignore_errors=True)
run_cmd('rm -f /tmp/*.log')
for f in os.listdir('/tmp'):
if re.search('.*-startup-cfg.xml|ncxserver_.*', f):
os.remove(os.path.join('/tmp/', f))
def get_ifaces ():
"""
Return the list of all defined interface. Rely on 'ifconfig' command.
:return: list of interfaces
:rtype: list
"""
return [iface.split(' ', 1)[0] for iface in os.popen('ifconfig -a -s') if
not iface.startswith('Iface')]
def get_escape_version ():
"""
Return the current ESCAPE version based on ``git describe``
in format: version-revision where version is the last found tag and revision
is the number of commits following the tag.
:return: version in format: vmajor.minor.patch[-revision-commit]
:rtype: str
"""
# Only match version tag like v2.0.0
# cmd = "git describe --always --first-parent --tags --match v*"
with open(os.devnull, 'wb') as DEVNULL:
desc = Popen("git describe --always --tags --match P*.*",
stdout=PIPE,
stderr=DEVNULL,
shell=True).communicate()[0].strip()
# If Git is not installed or command is failed
if not desc:
from escape import __version__
return __version__
else:
# If no tag is defined in the repo
if not desc.count('.'):
return "2.0.0-%s" % desc
else:
return desc
def get_escape_branch_name ():
"""
Return the current branch name.
:return: current branch
:rtype: str
"""
with open(os.devnull, 'wb') as DEVNULL:
branch = Popen("git symbolic-ref --short HEAD",
stdout=PIPE,
stderr=DEVNULL,
shell=True).communicate()[0]
return branch.strip() if branch else "N/A"
def get_escape_revision ():
"""
Return the initiation message for the current ESCAPE version.
Acquiring information from escape package.
:return: name and version
:rtype: tuple
"""
import escape
return escape.__project__, get_escape_version(), get_escape_branch_name()
def notify_remote_visualizer (data, url=None, unique_id=None, **kwargs):
"""
Send the given data to a remote visualization server.
If url is given use this address to send instead of the url defined in the
global config.
:param data: topology description need to send
:type data: :class:`NFFG` or :class:`Virtualizer`
:param url: additional URL (acquired from config by default)
:type url: str
:param unique_id: use given ID as NFFG id
:type unique_id: str or int
:param kwargs: optional parameters for request lib
:type kwargs: dict
:return: response
:rtype: str
"""
from pox.core import core
if core.hasComponent('visualizer'):
return core.visualizer.send_notification(data=data, url=url,
unique_id=unique_id, **kwargs)
def do_profile (func):
"""
Decorator to profile a function.
:param func: decorated function
:return: result of the decorated function
"""
@wraps(func)
def decorator_func (*args, **kwargs):
"""
Decorator function.
:return: tuple of the result of decorated function and statistics as str
:rtype: tuple
"""
profile = cProfile.Profile(builtins=False)
profile.enable()
try:
result = func(*args, **kwargs)
finally:
profile.disable()
profile.create_stats()
with io.BytesIO() as stat:
pstats.Stats(profile, stream=stat).sort_stats('cumulative').print_stats()
ret = stat.getvalue()
return result, ret
return decorator_func
def unicode_to_str (raw):
"""
Converter function to avoid unicode.
:param raw: raw data from
:return: converted data
"""
if isinstance(raw, dict):
return {unicode_to_str(key): unicode_to_str(value) for key, value in
raw.iteritems()}
elif isinstance(raw, list):
return [unicode_to_str(element) for element in raw]
elif isinstance(raw, unicode):
return raw.encode('utf-8').replace(' ', '_')
else:
return raw
def remove_units (raw):
"""
Remove units from resource values.
:param raw: raw resource value
:type raw: str
:return: resource value
:rtype: int
"""
return filter(lambda x: x.isdigit(), raw)
def check_service_status (name):
"""
Return if a Linux process given by ``name`` is running or not.
:param name: process name
:type name: str
:return: process is running or not
:rtype: bool
"""
status_all = run_cmd("sudo service --status-all")
for line in status_all.splitlines():
status, service = line.split(']')
if name == service.strip():
if "+" in status:
return True
else:
return False
return False
def port_tester (host, port, interval=1, period=5,
log=logging.getLogger("port_tester")):
"""
Test the given port with the interval (in sec) until the attempts reach the
given period.
:param host: host
:type host: str
:param port: port number
:type port: int
:param interval: delay betwwen the attempts
:type interval: int
:param period: number of checks
:type period: int
:param log: additional log object
:return: port is open or not
:rtype: bool
"""
log.debug(
"Testing port: %s on host: %s with interval: %ss" % (host, port, interval))
# noinspection PyArgumentList
for i in xrange(1, period):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((socket.gethostbyname(host), port))
log.log(VERBOSE, "Port open: %s!" % port)
return True
except socket.error:
log.log(VERBOSE, "Attempt: %s - Port closed!" % i)
s.close()
time.sleep(interval)
return False
|
{
"content_hash": "80b90ccda23af06f249a80a9fd8a17e4",
"timestamp": "",
"source": "github",
"line_count": 606,
"max_line_length": 79,
"avg_line_length": 27.353135313531354,
"alnum_prop": 0.6607142857142857,
"repo_name": "5GExchange/escape",
"id": "09b823bcd13db50372e8d897f8c3e678ba12b8c2",
"size": "17150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "escape/escape/util/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "381"
},
{
"name": "C",
"bytes": "9773701"
},
{
"name": "C++",
"bytes": "1144774"
},
{
"name": "Dockerfile",
"bytes": "4497"
},
{
"name": "HTML",
"bytes": "423218"
},
{
"name": "JavaScript",
"bytes": "9048"
},
{
"name": "Makefile",
"bytes": "121260"
},
{
"name": "Objective-C",
"bytes": "2964"
},
{
"name": "Python",
"bytes": "2856844"
},
{
"name": "Roff",
"bytes": "80820"
},
{
"name": "Shell",
"bytes": "190566"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import datetime
from decimal import Decimal
from django import test
from django import forms
from django.core.exceptions import ValidationError
from django.db import connection, models, IntegrityError
from django.db.models.fields import (
AutoField, BigIntegerField, BinaryField, BooleanField, CharField,
CommaSeparatedIntegerField, DateField, DateTimeField, DecimalField,
EmailField, FilePathField, FloatField, IntegerField, IPAddressField,
GenericIPAddressField, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils import unittest
from .models import (Foo, Bar, Whiz, BigD, BigS, Image, BigInt, Post,
NullBooleanModel, BooleanModel, DataModel, Document, RenamedField,
VerboseNameField, FksToBooleans)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=False)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBooleanModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 23):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
from django.db import connection
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
class DateTimeFieldTests(unittest.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'),
datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_usecs(self):
"""TimeField.to_python should support usecs"""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'),
datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'),
datetime.time(1, 2, 3, 999999))
class BooleanFieldTests(unittest.TestCase):
def _test_get_db_prep_lookup(self, f):
from django.db import connection
self.assertEqual(f.get_db_prep_lookup('exact', True, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', '1', connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', 1, connection=connection), [True])
self.assertEqual(f.get_db_prep_lookup('exact', False, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', '0', connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', 0, connection=connection), [False])
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def _test_to_python(self, f):
self.assertTrue(f.to_python(1) is True)
self.assertTrue(f.to_python(0) is False)
def test_booleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.BooleanField())
def test_nullbooleanfield_get_db_prep_lookup(self):
self._test_get_db_prep_lookup(models.NullBooleanField())
def test_booleanfield_to_python(self):
self._test_to_python(models.BooleanField())
def test_nullbooleanfield_to_python(self):
self._test_to_python(models.NullBooleanField())
def test_booleanfield_choices_blank(self):
"""
Test that BooleanField with choices and defaults doesn't generate a
formfield with the blank option (#9640, #10549).
"""
choices = [(1, 'Si'), (2, 'No')]
f = models.BooleanField(choices=choices, default=1, null=True)
self.assertEqual(f.formfield().choices, [('', '---------')] + choices)
f = models.BooleanField(choices=choices, default=1, null=False)
self.assertEqual(f.formfield().choices, choices)
def test_return_type(self):
b = BooleanModel()
b.bfield = True
b.save()
b2 = BooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.bfield, bool)
self.assertEqual(b2.bfield, True)
b3 = BooleanModel()
b3.bfield = False
b3.save()
b4 = BooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.bfield, bool)
self.assertEqual(b4.bfield, False)
b = NullBooleanModel()
b.nbfield = True
b.save()
b2 = NullBooleanModel.objects.get(pk=b.pk)
self.assertIsInstance(b2.nbfield, bool)
self.assertEqual(b2.nbfield, True)
b3 = NullBooleanModel()
b3.nbfield = False
b3.save()
b4 = NullBooleanModel.objects.get(pk=b3.pk)
self.assertIsInstance(b4.nbfield, bool)
self.assertEqual(b4.nbfield, False)
# http://code.djangoproject.com/ticket/13293
# Verify that when an extra clause exists, the boolean
# conversions are applied with an offset
b5 = BooleanModel.objects.all().extra(
select={'string_col': 'string'})[0]
self.assertFalse(isinstance(b5.pk, bool))
def test_select_related(self):
"""
Test type of boolean fields when retrieved via select_related() (MySQL,
#15040)
"""
bmt = BooleanModel.objects.create(bfield=True)
bmf = BooleanModel.objects.create(bfield=False)
nbmt = NullBooleanModel.objects.create(nbfield=True)
nbmf = NullBooleanModel.objects.create(nbfield=False)
m1 = FksToBooleans.objects.create(bf=bmt, nbf=nbmt)
m2 = FksToBooleans.objects.create(bf=bmf, nbf=nbmf)
# Test select_related('fk_field_name')
ma = FksToBooleans.objects.select_related('bf').get(pk=m1.id)
# verify types -- should't be 0/1
self.assertIsInstance(ma.bf.bfield, bool)
self.assertIsInstance(ma.nbf.nbfield, bool)
# verify values
self.assertEqual(ma.bf.bfield, True)
self.assertEqual(ma.nbf.nbfield, True)
# Test select_related()
mb = FksToBooleans.objects.select_related().get(pk=m1.id)
mc = FksToBooleans.objects.select_related().get(pk=m2.id)
# verify types -- shouldn't be 0/1
self.assertIsInstance(mb.bf.bfield, bool)
self.assertIsInstance(mb.nbf.nbfield, bool)
self.assertIsInstance(mc.bf.bfield, bool)
self.assertIsInstance(mc.nbf.nbfield, bool)
# verify values
self.assertEqual(mb.bf.bfield, True)
self.assertEqual(mb.nbf.nbfield, True)
self.assertEqual(mc.bf.bfield, False)
self.assertEqual(mc.nbf.nbfield, False)
def test_null_default(self):
"""
Check that a BooleanField defaults to None -- which isn't
a valid value (#15124).
"""
# Patch the boolean field's default value. We give it a default
# value when defining the model to satisfy the check tests
# #20895.
boolean_field = BooleanModel._meta.get_field('bfield')
self.assertTrue(boolean_field.has_default())
old_default = boolean_field.default
try:
boolean_field.default = models.NOT_PROVIDED
# check patch was succcessful
self.assertFalse(boolean_field.has_default())
b = BooleanModel()
self.assertIsNone(b.bfield)
with self.assertRaises(IntegrityError):
b.save()
finally:
boolean_field.default = old_default
nb = NullBooleanModel()
self.assertIsNone(nb.nbfield)
nb.save() # no error
class ChoicesTests(test.TestCase):
def test_choices_and_field_display(self):
"""
Check that get_choices and get_flatchoices interact with
get_FIELD_display to return the expected values (#7913).
"""
self.assertEqual(Whiz(c=1).get_c_display(), 'First') # A nested value
self.assertEqual(Whiz(c=0).get_c_display(), 'Other') # A top level value
self.assertEqual(Whiz(c=9).get_c_display(), 9) # Invalid value
self.assertEqual(Whiz(c=None).get_c_display(), None) # Blank value
self.assertEqual(Whiz(c='').get_c_display(), '') # Empty value
class SlugFieldTests(test.TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s = 'slug'*50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug'*50)
class ValidationTest(test.TestCase):
def test_charfield_raises_error_on_empty_string(self):
f = models.CharField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_charfield_cleans_empty_string_when_blank_true(self):
f = models.CharField(blank=True)
self.assertEqual('', f.clean('', None))
def test_integerfield_cleans_valid_string(self):
f = models.IntegerField()
self.assertEqual(2, f.clean('2', None))
def test_integerfield_raises_error_on_invalid_intput(self):
f = models.IntegerField()
self.assertRaises(ValidationError, f.clean, "a", None)
def test_charfield_with_choices_cleans_valid_choice(self):
f = models.CharField(max_length=1, choices=[('a','A'), ('b','B')])
self.assertEqual('a', f.clean('a', None))
def test_charfield_with_choices_raises_error_on_invalid_choice(self):
f = models.CharField(choices=[('a','A'), ('b','B')])
self.assertRaises(ValidationError, f.clean, "not a", None)
def test_choices_validation_supports_named_groups(self):
f = models.IntegerField(choices=(('group',((10,'A'),(20,'B'))),(30,'C')))
self.assertEqual(10, f.clean(10, None))
def test_nullable_integerfield_raises_error_with_blank_false(self):
f = models.IntegerField(null=True, blank=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_nullable_integerfield_cleans_none_on_null_and_blank_true(self):
f = models.IntegerField(null=True, blank=True)
self.assertEqual(None, f.clean(None, None))
def test_integerfield_raises_error_on_empty_input(self):
f = models.IntegerField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
self.assertRaises(ValidationError, f.clean, '', None)
def test_integerfield_validates_zero_against_choices(self):
f = models.IntegerField(choices=((1, 1),))
self.assertRaises(ValidationError, f.clean, '0', None)
def test_charfield_raises_error_on_empty_input(self):
f = models.CharField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
def test_boolean_field_doesnt_accept_empty_input(self):
f = models.BooleanField()
self.assertRaises(ValidationError, f.clean, None, None)
class BigIntegerFieldTests(test.TestCase):
def test_limits(self):
# Ensure that values that are right at the limits can be saved
# and then retrieved without corruption.
maxval = 9223372036854775807
minval = -maxval - 1
BigInt.objects.create(value=maxval)
qs = BigInt.objects.filter(value__gte=maxval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, maxval)
BigInt.objects.create(value=minval)
qs = BigInt.objects.filter(value__lte=minval)
self.assertEqual(qs.count(), 1)
self.assertEqual(qs[0].value, minval)
def test_types(self):
b = BigInt(value = 0)
self.assertIsInstance(b.value, six.integer_types)
b.save()
self.assertIsInstance(b.value, six.integer_types)
b = BigInt.objects.all()[0]
self.assertIsInstance(b.value, six.integer_types)
def test_coercing(self):
BigInt.objects.create(value ='10')
b = BigInt.objects.get(value = '10')
self.assertEqual(b.value, 10)
class TypeCoercionTests(test.TestCase):
"""
Test that database lookups can accept the wrong types and convert
them with no error: especially on Postgres 8.3+ which does not do
automatic casting at the DB level. See #10015.
"""
def test_lookup_integer_in_charfield(self):
self.assertEqual(Post.objects.filter(title=9).count(), 0)
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
class FileFieldTests(unittest.TestCase):
def test_clearable(self):
"""
Test that FileField.save_form_data will clear its instance attribute
value if passed False.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, False)
self.assertEqual(d.myfile, '')
def test_unchanged(self):
"""
Test that FileField.save_form_data considers None to mean "no change"
rather than "clear".
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, None)
self.assertEqual(d.myfile, 'something.txt')
def test_changed(self):
"""
Test that FileField.save_form_data, if passed a truthy value, updates
its instance attribute.
"""
d = Document(myfile='something.txt')
self.assertEqual(d.myfile, 'something.txt')
field = d._meta.get_field('myfile')
field.save_form_data(d, 'else.txt')
self.assertEqual(d.myfile, 'else.txt')
def test_delete_when_file_unset(self):
"""
Calling delete on an unset FileField should not call the file deletion
process, but fail silently (#20660).
"""
d = Document()
try:
d.myfile.delete()
except OSError:
self.fail("Deleting an unset FileField should not raise OSError.")
class BinaryFieldTests(test.TestCase):
binary_data = b'\x00\x46\xFE'
def test_set_and_retrieve(self):
data_set = (self.binary_data, six.memoryview(self.binary_data))
for bdata in data_set:
dm = DataModel(data=bdata)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Resave (=update)
dm.save()
dm = DataModel.objects.get(pk=dm.pk)
self.assertEqual(bytes(dm.data), bytes(bdata))
# Test default value
self.assertEqual(bytes(dm.short_data), b'\x08')
if connection.vendor == 'mysql' and six.PY3:
# Existing MySQL DB-API drivers fail on binary data.
test_set_and_retrieve = unittest.expectedFailure(test_set_and_retrieve)
def test_max_length(self):
dm = DataModel(short_data=self.binary_data*4)
self.assertRaises(ValidationError, dm.full_clean)
class GenericIPAddressFieldTests(test.TestCase):
def test_genericipaddressfield_formfield_protocol(self):
"""
Test that GenericIPAddressField with a specified protocol does not
generate a formfield with no specified protocol. See #20740.
"""
model_field = models.GenericIPAddressField(protocol='IPv4')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '::1')
model_field = models.GenericIPAddressField(protocol='IPv6')
form_field = model_field.formfield()
self.assertRaises(ValidationError, form_field.clean, '127.0.0.1')
class PrepValueTest(test.TestCase):
def test_AutoField(self):
self.assertIsInstance(AutoField(primary_key=True).get_prep_value(1), int)
@unittest.skipIf(six.PY3, "Python 3 has no `long` type.")
def test_BigIntegerField(self):
self.assertIsInstance(BigIntegerField().get_prep_value(long(9999999999999999999)), long)
def test_BinaryField(self):
self.assertIsInstance(BinaryField().get_prep_value(b''), bytes)
def test_BooleanField(self):
self.assertIsInstance(BooleanField().get_prep_value(True), bool)
def test_CharField(self):
self.assertIsInstance(CharField().get_prep_value(''), six.text_type)
self.assertIsInstance(CharField().get_prep_value(0), six.text_type)
def test_CommaSeparatedIntegerField(self):
self.assertIsInstance(CommaSeparatedIntegerField().get_prep_value('1,2'), six.text_type)
self.assertIsInstance(CommaSeparatedIntegerField().get_prep_value(0), six.text_type)
def test_DateField(self):
self.assertIsInstance(DateField().get_prep_value(datetime.date.today()), datetime.date)
def test_DateTimeField(self):
self.assertIsInstance(DateTimeField().get_prep_value(datetime.datetime.now()), datetime.datetime)
def test_DecimalField(self):
self.assertIsInstance(DecimalField().get_prep_value(Decimal('1.2')), Decimal)
def test_EmailField(self):
self.assertIsInstance(EmailField().get_prep_value('mailbox@domain.com'), six.text_type)
def test_FileField(self):
self.assertIsInstance(FileField().get_prep_value('filename.ext'), six.text_type)
self.assertIsInstance(FileField().get_prep_value(0), six.text_type)
def test_FilePathField(self):
self.assertIsInstance(FilePathField().get_prep_value('tests.py'), six.text_type)
self.assertIsInstance(FilePathField().get_prep_value(0), six.text_type)
def test_FloatField(self):
self.assertIsInstance(FloatField().get_prep_value(1.2), float)
def test_ImageField(self):
self.assertIsInstance(ImageField().get_prep_value('filename.ext'), six.text_type)
def test_IntegerField(self):
self.assertIsInstance(IntegerField().get_prep_value(1), int)
def test_IPAddressField(self):
self.assertIsInstance(IPAddressField().get_prep_value('127.0.0.1'), six.text_type)
self.assertIsInstance(IPAddressField().get_prep_value(0), six.text_type)
def test_GenericIPAddressField(self):
self.assertIsInstance(GenericIPAddressField().get_prep_value('127.0.0.1'), six.text_type)
self.assertIsInstance(GenericIPAddressField().get_prep_value(0), six.text_type)
def test_NullBooleanField(self):
self.assertIsInstance(NullBooleanField().get_prep_value(True), bool)
def test_PositiveIntegerField(self):
self.assertIsInstance(PositiveIntegerField().get_prep_value(1), int)
def test_PositiveSmallIntegerField(self):
self.assertIsInstance(PositiveSmallIntegerField().get_prep_value(1), int)
def test_SlugField(self):
self.assertIsInstance(SlugField().get_prep_value('slug'), six.text_type)
self.assertIsInstance(SlugField().get_prep_value(0), six.text_type)
def test_SmallIntegerField(self):
self.assertIsInstance(SmallIntegerField().get_prep_value(1), int)
def test_TextField(self):
self.assertIsInstance(TextField().get_prep_value('Abc'), six.text_type)
self.assertIsInstance(TextField().get_prep_value(0), six.text_type)
def test_TimeField(self):
self.assertIsInstance(
TimeField().get_prep_value(datetime.datetime.now().time()),
datetime.time)
def test_URLField(self):
self.assertIsInstance(URLField().get_prep_value('http://domain.com'), six.text_type)
class CustomFieldTests(unittest.TestCase):
def test_14786(self):
"""
Regression test for #14786 -- Test that field values are not prepared
twice in get_db_prep_lookup().
"""
prepare_count = [0]
class NoopField(models.TextField):
def get_prep_value(self, value):
prepare_count[0] += 1
return super(NoopField, self).get_prep_value(value)
field = NoopField()
field.get_db_prep_lookup('exact', 'TEST', connection=connection, prepared=False)
self.assertEqual(prepare_count[0], 1)
|
{
"content_hash": "cb03b16045db81c54967f6899c1267f3",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 105,
"avg_line_length": 40.138385502471166,
"alnum_prop": 0.6464455754391726,
"repo_name": "atruberg/django-custom",
"id": "704ae0b822e3f043c7f5abb1a4db32ff39789204",
"size": "24364",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/model_fields/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51013"
},
{
"name": "JavaScript",
"bytes": "98272"
},
{
"name": "Python",
"bytes": "8636914"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
}
|
"""Read the sequence data from a nexus file.
This IO code only gives read access to the sequence data.
Reference:
'NEXUS: An extensible file format for systematic information'
Maddison, Swofford, Maddison. 1997. Syst. Biol. 46(4):590-621
"""
from corebio.seq import Seq, SeqList, Alphabet
from corebio.seq_io._nexus import Nexus, safename
names = ( 'nexus', 'paup')
extensions = ('nex', 'nexus', 'paup', 'nxs')
def iterseq(fin, alphabet=None):
"""Iterate over the sequences in the file."""
# Default implementation
return iter(read(fin, alphabet) )
def read(fin, alphabet=None):
""" Extract sequence data from a nexus file."""
n = Nexus(fin)
seqs = []
for taxon in n.taxlabels:
name = safename(taxon)
r = n.matrix[taxon]
if alphabet is None :
s = Seq(r, name = name, alphabet=r.alphabet)
else :
s = Seq(r, name = name, alphabet=alphabet )
seqs.append(s)
if len(seqs) == 0 :
# Something went terrible wrong.
raise ValueError("Cannot parse file")
return SeqList(seqs)
|
{
"content_hash": "c9840c9bb39110243e32feb711840f85",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 61,
"avg_line_length": 23.3265306122449,
"alnum_prop": 0.6080489938757655,
"repo_name": "JohnReid/bioinf-utilities",
"id": "9d3321d91e43a19ff79b7f9163ce63e6c34ec2fc",
"size": "2495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/corebio/seq_io/nexus_io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "437828"
},
{
"name": "Racket",
"bytes": "733"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, url
from gestionchismes import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
url(r'logueo/$', views.logueo, name='logueo'),
url(r'logoff/$', views.logoff, name='logoff'),
url(r'crear/$', views.create, name='create'),
url(r'crearchisme/$', views.crearchisme, name='crearchisme'),
url(r'cuenta/$', views.cuenta, name='cuenta'),
url(r'seguir/(?P<user_id>\w+)/$', views.seguir, name='seguirusuario'),
url(r'dejar/(?P<user_id>\w+)/$', views.dejar, name='dejar'),
url(r'eliminar/(?P<mens_id>\d+)/$', views.eliminarchisme, name='eliminarchisme'),
url(r'fav/(?P<user_id>\w+)/(?P<mens_id>\d+)/$', views.fav, name='fav'),
url(r'baja/(?P<user_id>\w+)/$', views.dardebaja, name='baja'),
url(r'pulsan_favorito/(?P<mens_id>\d+)/$', views.pulsan_favorito, name='pulsan_favorito'),
url(r'pulsan_retweet/(?P<mens_id>\d+)/$', views.pulsan_retweet, name='pulsan_retweet'),
url(r'buscapersonalizada/$', views.buscar, name='buscar'),
url(r'retuit/(?P<user_id>\w+)/(?P<mens_id>\d+)/$', views.retuit, name='retwit'),
#para acceder hay que poner nombre_api/add
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
{
"content_hash": "b464272ec2be7e007aa9c305c1ecf979",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 49.2,
"alnum_prop": 0.675609756097561,
"repo_name": "leoraca/Social_libre",
"id": "ddce84b61108ff12e19d7fa1b8f596562b13103a",
"size": "1410",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Chisme/gestionchismes/urls.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "275654"
},
{
"name": "HTML",
"bytes": "27069"
},
{
"name": "Python",
"bytes": "23412"
}
],
"symlink_target": ""
}
|
"""
Test class for baremetal IPMI power manager.
"""
import os
import stat
import tempfile
from nova.openstack.common import cfg
from nova import test
from nova.tests.baremetal.db import utils as bm_db_utils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import ipmi
from nova.virt.baremetal import utils as bm_utils
CONF = cfg.CONF
class BareMetalIPMITestCase(test.TestCase):
def setUp(self):
super(BareMetalIPMITestCase, self).setUp()
self.node = bm_db_utils.new_bm_node(
id=123,
pm_address='fake-address',
pm_user='fake-user',
pm_password='fake-password')
self.ipmi = ipmi.IPMI(self.node)
def test_construct(self):
self.assertEqual(self.ipmi.node_id, 123)
self.assertEqual(self.ipmi.address, 'fake-address')
self.assertEqual(self.ipmi.user, 'fake-user')
self.assertEqual(self.ipmi.password, 'fake-password')
def test_make_password_file(self):
pw_file = ipmi._make_password_file(self.node['pm_password'])
try:
self.assertTrue(os.path.isfile(pw_file))
self.assertEqual(os.stat(pw_file)[stat.ST_MODE] & 0777, 0600)
with open(pw_file, "r") as f:
pm_password = f.read()
self.assertEqual(pm_password, self.node['pm_password'])
finally:
os.unlink(pw_file)
def test_exec_ipmitool(self):
pw_file = '/tmp/password_file'
self.mox.StubOutWithMock(ipmi, '_make_password_file')
self.mox.StubOutWithMock(utils, 'execute')
self.mox.StubOutWithMock(bm_utils, 'unlink_without_raise')
ipmi._make_password_file(self.ipmi.password).AndReturn(pw_file)
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.ipmi.address,
'-U', self.ipmi.user,
'-f', pw_file,
'A', 'B', 'C',
]
utils.execute(*args, attempts=3).AndReturn(('', ''))
bm_utils.unlink_without_raise(pw_file).AndReturn(None)
self.mox.ReplayAll()
self.ipmi._exec_ipmitool('A B C')
self.mox.VerifyAll()
def test_is_power(self):
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
self.ipmi._is_power("on")
self.mox.VerifyAll()
def test_power_already_on(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
def test_power_on_ok(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ACTIVE)
def test_power_on_fail(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
def test_power_on_max_retries(self):
self.flags(ipmi_power_retry=2, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.ipmi._exec_ipmitool("power on").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.DELETED
self.ipmi._power_on()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.ERROR)
self.assertEqual(self.ipmi.retries, 3)
def test_power_off_ok(self):
self.flags(ipmi_power_retry=0, group='baremetal')
self.mox.StubOutWithMock(self.ipmi, '_exec_ipmitool')
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is on\n"])
self.ipmi._exec_ipmitool("power off").AndReturn([])
self.ipmi._exec_ipmitool("power status").AndReturn(
["Chassis Power is off\n"])
self.mox.ReplayAll()
self.ipmi.state = baremetal_states.ACTIVE
self.ipmi._power_off()
self.mox.VerifyAll()
self.assertEqual(self.ipmi.state, baremetal_states.DELETED)
def test_get_console_pid_path(self):
self.flags(terminal_pid_dir='/tmp', group='baremetal')
path = ipmi._get_console_pid_path(self.ipmi.node_id)
self.assertEqual(path, '/tmp/%s.pid' % self.ipmi.node_id)
def test_console_pid(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write("12345\n")
self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
bm_utils.unlink_without_raise(path)
self.mox.VerifyAll()
self.assertEqual(pid, 12345)
def test_console_pid_nan(self):
fd, path = tempfile.mkstemp()
with os.fdopen(fd, 'w') as f:
f.write("hello world\n")
self.mox.StubOutWithMock(ipmi, '_get_console_pid_path')
ipmi._get_console_pid_path(self.ipmi.node_id).AndReturn(path)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
bm_utils.unlink_without_raise(path)
self.mox.VerifyAll()
self.assertTrue(pid is None)
def test_console_pid_file_not_found(self):
pid_path = ipmi._get_console_pid_path(self.ipmi.node_id)
self.mox.StubOutWithMock(os.path, 'exists')
os.path.exists(pid_path).AndReturn(False)
self.mox.ReplayAll()
pid = ipmi._get_console_pid(self.ipmi.node_id)
self.mox.VerifyAll()
self.assertTrue(pid is None)
|
{
"content_hash": "df6c6525aa3c9b42aa498d4b46906663",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 73,
"avg_line_length": 36.25365853658536,
"alnum_prop": 0.6056243272335845,
"repo_name": "houshengbo/nova_vmware_compute_driver",
"id": "def6da66f0ee345c1c2fdbb27862f44a1976ae58",
"size": "8187",
"binary": false,
"copies": "1",
"ref": "refs/heads/attach-detach-VMware-iSCSI-driver",
"path": "nova/tests/baremetal/test_ipmi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "7173520"
},
{
"name": "Shell",
"bytes": "15478"
}
],
"symlink_target": ""
}
|
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisDnsPtStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.dns.pt/status_registered.txt"
host = "whois.dns.pt"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns4.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns1.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns3.google.com")
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2003-01-09 00:00:00 UTC'))
def test_updated_on(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.updated_on)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2015-02-28 00:00:00 UTC'))
|
{
"content_hash": "952bf0ac0a8828e94090a0837052006a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 87,
"avg_line_length": 41.72093023255814,
"alnum_prop": 0.633221850613155,
"repo_name": "huyphan/pyyawhois",
"id": "e6e008451986cc06f8955e0dc64db21f2d65e7c1",
"size": "2055",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_dns_pt_status_registered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
}
|
from django import forms
class DateTimeWidget(forms.MultiWidget):
def decompress(self, value):
if value:
return [value.date(), value.time().replace(microsecond=0)]
return [None, None]
|
{
"content_hash": "7db149805c097dc71c468f1ed31e4098",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 30.857142857142858,
"alnum_prop": 0.6574074074074074,
"repo_name": "alexkyllo/django-calendar-events",
"id": "6fea3a0e92a72fd86812583ce7cf1f4721555bd0",
"size": "216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calendar_events/widgets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12084"
},
{
"name": "JavaScript",
"bytes": "2352"
},
{
"name": "Python",
"bytes": "11899"
}
],
"symlink_target": ""
}
|
import time
from mitmproxy.net import websockets
from pathod import language
from mitmproxy import exceptions
class WebsocketsProtocol:
def __init__(self, pathod_handler):
self.pathod_handler = pathod_handler
def handle_websocket(self, logger):
while True:
with logger.ctx() as lg:
started = time.time()
try:
frm = websockets.Frame.from_file(self.pathod_handler.rfile)
except exceptions.NetlibException as e:
lg("Error reading websocket frame: %s" % e)
return None, None
ended = time.time()
lg(repr(frm))
retlog = dict(
type="inbound",
protocol="websockets",
started=started,
duration=ended - started,
frame=dict(
),
cipher=None,
)
if self.pathod_handler.tls_established:
retlog["cipher"] = self.pathod_handler.get_current_cipher()
self.pathod_handler.addlog(retlog)
ld = language.websockets.NESTED_LEADER
if frm.payload.startswith(ld):
nest = frm.payload[len(ld):]
try:
wf_gen = language.parse_websocket_frame(nest.decode())
except language.exceptions.ParseException as v:
logger.write(
"Parse error in reflected frame specifcation:"
" %s" % v.msg
)
return None, None
for frm in wf_gen:
with logger.ctx() as lg:
frame_log = language.serve(
frm,
self.pathod_handler.wfile,
self.pathod_handler.settings
)
lg("crafting websocket spec: %s" % frame_log["spec"])
self.pathod_handler.addlog(frame_log)
|
{
"content_hash": "63d6096cbd5676a3d7db78ee8bd29c20",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 37.61818181818182,
"alnum_prop": 0.4775253745770904,
"repo_name": "zlorb/mitmproxy",
"id": "63e6ee0ba86a0f320809384c988564ee412a82fd",
"size": "2069",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "pathod/protocols/websockets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20941"
},
{
"name": "HTML",
"bytes": "14747"
},
{
"name": "JavaScript",
"bytes": "276327"
},
{
"name": "PowerShell",
"bytes": "495"
},
{
"name": "Python",
"bytes": "1725551"
},
{
"name": "Shell",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
routes = Blueprint('routes', __name__)
from .RouteManager import *
|
{
"content_hash": "eb3cc49aa10c38056c69575f056d915e",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 38,
"avg_line_length": 19.2,
"alnum_prop": 0.7395833333333334,
"repo_name": "I2MAX-LearningProject/Flask-server",
"id": "246c520476d3554e254ac79364834255b5587ba6",
"size": "96",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Route/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "50519"
},
{
"name": "HTML",
"bytes": "34826"
},
{
"name": "JavaScript",
"bytes": "13905"
},
{
"name": "Jupyter Notebook",
"bytes": "471773"
},
{
"name": "Python",
"bytes": "48594"
},
{
"name": "Shell",
"bytes": "231"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_head_api_operation_policy.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.api_operation_policy.get_entity_tag(
resource_group_name="rg1",
service_name="apimService1",
api_id="5600b539c53f5b0062040001",
operation_id="5600b53ac53f5b0062080006",
policy_id="policy",
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementHeadApiOperationPolicy.json
if __name__ == "__main__":
main()
|
{
"content_hash": "6af361979e44ac92989fb6d9d8cd6333",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 158,
"avg_line_length": 34.388888888888886,
"alnum_prop": 0.7302100161550888,
"repo_name": "Azure/azure-sdk-for-python",
"id": "c4080e717790bb83ee9900bfa0e22333f7766135",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_head_api_operation_policy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Ipv6Routes(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param Distance: {"type": "number", "format": "number"}
:param Interface: {"type": "string", "format": "string"}
:param Metric: {"type": "number", "format": "number"}
:param Nexthop: {"type": "string", "format": "ipv6-address"}
:param Subtype: {"enum": ["inter-area", "nssa-type-1", "nssa-type-2", "external-type-1", "external-type-2", "level-1", "level-2"], "type": "string", "format": "enum"}
:param Prefix: {"type": "string", "format": "ipv6-address"}
:param PrefixLen: {"type": "number", "format": "number"}
:param Type: {"enum": ["kernel", "connected", "static", "rip", "ospf", "bgp", "isis", "vip", "selected-vip", "ip-nat-list", "ip-nat", "floating-ip", "a10"], "type": "string", "format": "enum"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "IPv6-routes"
self.DeviceProxy = ""
self.Distance = ""
self.Interface = ""
self.Metric = ""
self.Nexthop = ""
self.Subtype = ""
self.Prefix = ""
self.PrefixLen = ""
self.Type = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Oper(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param IPv6_routes: {"minItems": 1, "items": {"type": "object"}, "uniqueItems": true, "type": "array", "array": [{"properties": {"Distance": {"type": "number", "format": "number"}, "Interface": {"type": "string", "format": "string"}, "optional": true, "Metric": {"type": "number", "format": "number"}, "Nexthop": {"type": "string", "format": "ipv6-address"}, "Subtype": {"enum": ["inter-area", "nssa-type-1", "nssa-type-2", "external-type-1", "external-type-2", "level-1", "level-2"], "type": "string", "format": "enum"}, "Prefix": {"type": "string", "format": "ipv6-address"}, "PrefixLen": {"type": "number", "format": "number"}, "Type": {"enum": ["kernel", "connected", "static", "rip", "ospf", "bgp", "isis", "vip", "selected-vip", "ip-nat-list", "ip-nat", "floating-ip", "a10"], "type": "string", "format": "enum"}}}]}
:param Total: {"type": "number", "format": "number"}
:param Limit: {"type": "number", "format": "number"}
:param Description: {"type": "string", "format": "string-rlx"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "oper"
self.DeviceProxy = ""
self.IPv6_routes = []
self.Total = ""
self.Limit = ""
self.Description = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Rib(A10BaseClass):
"""Class Description::
Operational Status for the object rib.
Class rib supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/ipv6/rib/oper`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "rib"
self.a10_url="/axapi/v3/ipv6/rib/oper"
self.DeviceProxy = ""
self.oper = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "65ad825aac90a1d6e8c27f0007999daf",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 826,
"avg_line_length": 38.775510204081634,
"alnum_prop": 0.5778947368421052,
"repo_name": "amwelch/a10sdk-python",
"id": "ce6d651091911aef895e2f52cf691e00da65c0f6",
"size": "3800",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/ipv6/ipv6_rib_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
r"""
Nonlinear solvers
-----------------
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
~~~~~~~~
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
~~~~~~~~
**Small problem**
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
**Large problem**
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print('Residual: %g' % abs(residual(sol)).max())
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolormesh(x, y, sol, shading='gouraud')
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as SciPy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
from scipy.linalg import get_blas_funcs
import inspect
from scipy._lib._util import getfullargspec_no_self as _getfullargspec
from .linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
xin : array_like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : ndarray
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None,
full_output=False, raise_exception=True):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
full_output : bool
If true, returns a dictionary `info` containing convergence
information.
raise_exception : bool
If True, a `NoConvergence` exception is raise if no solution is found.
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
https://archive.siam.org/books/kelley/fr16/
"""
# Can't use default parameters because it's being explicitly passed as None
# from the calling function, so we need to set it here.
tol_norm = maxnorm if tol_norm is None else tol_norm
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.full_like(x, np.inf)
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in range(maxiter):
status = condition.check(Fx, x, dx)
if status:
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x = x + dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g\n" % (
n, tol_norm(Fx), s))
sys.stdout.flush()
else:
if raise_exception:
raise NoConvergence(_array_like(x, x0))
else:
status = 2
if full_output:
info = {'nit': condition.iteration,
'fun': Fx,
'status': status,
'success': status == 1,
'message': {1: 'A solution was found at the specified '
'tolerance.',
2: 'The maximum number of iterations allowed '
'has been reached.'
}[status]
}
return _array_like(x, x0), info
else:
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition:
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = norm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return 1
if self.iter is not None:
# backwards compatibility with SciPy 0.6.0
return 2 * (self.iteration > self.iter)
# NB: condition must succeed for rtol=inf even if norm == 0
return int((f_norm <= self.f_tol
and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol
and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian:
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc., algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(x, F)
class InverseJacobian:
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# Autoscale the initial Jacobian parameter
# unless we have already guessed the solution.
normf0 = norm(f0)
if normf0:
self.alpha = 0.5*max(norm(x0), 1) / normf0
else:
self.alpha = 1.0
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix:
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [1]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='economic')
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in range(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is ``(-1/alpha)``.
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Takes an extra parameter, ``to_retain``, which determines the
number of SVD components to retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (i.e., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden1'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden1(fun, [0, 0])
>>> sol
array([0.84116396, 0.15883641])
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='broyden2'`` in particular.
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\\dagger / ( df^\\dagger df)
corresponding to Broyden's second method.
References
----------
.. [1] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
https://web.archive.org/web/20161022015821/http://www.math.leidenuniv.nl/scripties/Rotten.pdf
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.broyden2(fun, [0, 0])
>>> sol
array([0.84116365, 0.15883529])
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='anderson'`` in particular.
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.anderson(fun, [0, 0])
>>> sol
array([0.84116588, 0.15883789])
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, i.e.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in range(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in range(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in range(n):
for j in range(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in range(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in range(n):
for j in range(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='diagbroyden'`` in particular.
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.diagbroyden(fun, [0, 0])
>>> sol
array([0.84116403, 0.15883384])
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.full((self.shape[0],), 1 / self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='linearmixing'`` in particular.
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(np.full(self.shape[0], -1/self.alpha))
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='excitingmixing'`` in particular.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = np.full((self.shape[0],), self.alpha, dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_maxiter : int, optional
Parameter to pass to the "inner" Krylov solver: maximum number of
iterations. Iteration will stop after maxiter steps even if the
specified tolerance has not been achieved.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> from scipy.optimize.nonlin import BroydenFirst, KrylovJacobian
>>> from scipy.optimize.nonlin import InverseJacobian
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=InverseJacobian(jac))
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
inner_kwargs : kwargs
Keyword parameters for the "inner" Krylov solver
(defined with `method`). Parameter names must start with
the `inner_` prefix which will be stripped before passing on
the inner method. See, e.g., `scipy.sparse.linalg.gmres` for details.
%(params_extra)s
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See ``method=='krylov'`` in particular.
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by a finite difference:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
SciPy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [1]_,
and for the LGMRES sparse inverse method, see [2]_.
References
----------
.. [1] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2004).
:doi:`10.1016/j.jcp.2003.08.010`
.. [2] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
:doi:`10.1137/S0895479803422014`
Examples
--------
The following functions define a system of nonlinear equations
>>> def fun(x):
... return [x[0] + 0.5 * x[1] - 1.0,
... 0.5 * (x[1] - x[0]) ** 2]
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.newton_krylov(fun, [0, 0])
>>> sol
array([0.66731771, 0.66536458])
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.gcrotmk:
self.method_kw.setdefault('atol', 0)
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
self.method_kw.setdefault('prepend_outer_v', True)
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See e.g., Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
self.method_kw.setdefault('atol', 0)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
if 'tol' in self.method_kw:
sol, info = self.method(self.op, rhs, **self.method_kw)
else:
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and Jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
signature = _getfullargspec(jac.__init__)
args, varargs, varkw, defaults, kwonlyargs, kwdefaults, _ = signature
kwargs = list(zip(args[-len(defaults):], defaults))
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
if kwonlyargs:
raise ValueError('Unexpected signature %s' % signature)
# Construct the wrapper function so that its keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec(wrapper, ns)
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
|
{
"content_hash": "82831350d21c832d1c5e983c4fb4ead7",
"timestamp": "",
"source": "github",
"line_count": 1660,
"max_line_length": 104,
"avg_line_length": 30.52530120481928,
"alnum_prop": 0.5446400378907483,
"repo_name": "ryfeus/lambda-packs",
"id": "6b0fc3414cfa3e06887c34b6b1a31c7cad47e5e6",
"size": "50672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Sklearn_x86/source/scipy/optimize/nonlin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
__author__ = 'kotaimen'
__date__ = '6/3/14'
"""
georest.geo.metadata
~~~~~~~~~~~~~~~~~~~~
Metadata of a geometry, aka geoindex helpers
"""
import collections
import geohash
import shapely.geometry
import shapely.geometry.base
class Metadata(collections.namedtuple('Metadata',
'''bbox geohash cells''')):
""" Metadata of a geometry
- `geohash` geohash of the geometry used to do quick adhoc spatial search.
For point geometry, max precision is 12 chars, for other geometry, defined
by its bounding box. Note geohash only works on geometry with lonlat
based coordinate reference systems.
- `bbox` bounding box of the geometry as a list [minx, miny, maxx, maxy]
- `cells` a list of integers describes S2CellID of the geometry, currently
not implemented.
"""
GEOHASH_LENGTH = 12
@classmethod
def make_metadata(cls, geometry=None):
bbox = calc_bbox(geometry)
geohash = calc_geohash(geometry, Metadata.GEOHASH_LENGTH)
return cls(bbox, geohash, [])
def spawn(self, geometry):
assert geometry is not None
bbox = calc_bbox(geometry)
geohash = calc_geohash(geometry, Metadata.GEOHASH_LENGTH)
return self._replace(bbox=bbox, geohash=geohash)
def calc_bbox(geom):
"""Calculate bounding box of the geometry"""
assert isinstance(geom, shapely.geometry.base.BaseGeometry)
return list(geom.bounds)
def calc_geohash(geom, length=7, ignore_crs=False):
"""Calculate geohash of th geometry, mimics behaviour of postgis st_geohash
`geom` must be a geometry with lonlat coordinates and `precision` is
length of returned hash string for Point geometry.
"""
assert isinstance(geom, shapely.geometry.base.BaseGeometry)
if geom.is_empty:
return ''
# only supports lonlat coordinates
if not ignore_crs:
crs = geom.crs
if crs is None or not crs.proj.is_latlong():
return ''
assert isinstance(length, int)
assert length > 1 # useless if precision is too short
if geom.geom_type == 'Point':
return geohash.encode(geom.y, geom.x, length)
else:
(left, bottom, right, top) = geom.bounds
# Calculate the bounding box precision
hash1 = geohash.encode(bottom, left, length)
hash2 = geohash.encode(top, right, length)
try:
bounds_precision = \
list(x == y for x, y in zip(hash1, hash2)).index(False)
except ValueError:
# list.index throws ValueError if value is not found
bounds_precision = length
# Calculate geohash using center point and bounds precision
return geohash.encode((top + bottom) / 2.,
(right + left) / 2.,
bounds_precision)
def calc_cell_union(geom):
# not until we have implemented google.s2 extension
raise NotImplementedError
|
{
"content_hash": "db3831ad7bd7c73e7f1005753f72ae1e",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 80,
"avg_line_length": 30.21212121212121,
"alnum_prop": 0.6352390504847877,
"repo_name": "Kotaimen/georest",
"id": "c17f0cb4445676f04e241d8385a2ae3ed217f265",
"size": "3018",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "georest/geo/metadata.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Lua",
"bytes": "3316"
},
{
"name": "Python",
"bytes": "203216"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.db.models.signals import post_save
from sentry.adoption import manager
from sentry.models import FeatureAdoption, GroupTombstone
from sentry.plugins import IssueTrackingPlugin, IssueTrackingPlugin2
from sentry.plugins.bases.notify import NotificationPlugin
from sentry.receivers.rules import DEFAULT_RULE_LABEL, DEFAULT_RULE_DATA
from sentry.signals import (
alert_rule_created,
event_processed,
first_event_received,
project_created,
member_joined,
plugin_enabled,
user_feedback_received,
issue_assigned,
issue_resolved_in_release,
advanced_search,
save_search_created,
inbound_filter_toggled,
sso_enabled,
data_scrubber_enabled,
repo_linked,
release_created,
deploy_created,
resolved_with_commit,
ownership_rule_created,
issue_ignored,
)
from sentry.utils.javascript import has_sourcemap
DEFAULT_TAGS = frozenset(
[
'level', 'logger', 'transaction', 'url', 'browser', 'sentry:user', 'os', 'server_name',
'device', 'os.name', 'browser.name', 'sentry:release', 'environment', 'device.family',
'site', 'version', 'interface_type', 'rake_task', 'runtime', 'runtime.name', 'type',
'php_version', 'app', 'app.device', 'locale', 'os_version', 'device_model', 'deviceModel',
'sentry_version'
]
)
# First Event
@first_event_received.connect(weak=False)
def record_first_event(project, group, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="first_event", complete=True
)
@event_processed.connect(weak=False)
def record_event_processed(project, group, event, **kwargs):
feature_slugs = []
# Platform
if group.platform in manager.location_slugs('language'):
feature_slugs.append(group.platform)
# Release Tracking
if event.get_tag('sentry:release'):
feature_slugs.append('release_tracking')
# Environment Tracking
if event.get_tag('environment'):
feature_slugs.append('environment_tracking')
# User Tracking
user_context = event.data.get('sentry.interfaces.User')
# We'd like them to tag with id or email.
# Certain SDKs automatically tag with ip address.
# Check to make sure more the ip address is being sent.
# testing for this in test_no_user_tracking_for_ip_address_only
# list(d.keys()) pattern is to make this python3 safe
if user_context and list(user_context.keys()) != ['ip_address']:
feature_slugs.append('user_tracking')
# Custom Tags
if set(tag[0] for tag in event.tags) - DEFAULT_TAGS:
feature_slugs.append('custom_tags')
# Sourcemaps
if has_sourcemap(event):
feature_slugs.append('source_maps')
# Breadcrumbs
if event.data.get('sentry.interfaces.Breadcrumbs'):
feature_slugs.append('breadcrumbs')
if not feature_slugs:
return
FeatureAdoption.objects.bulk_record(project.organization_id, feature_slugs)
@user_feedback_received.connect(weak=False)
def record_user_feedback(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="user_feedback", complete=True
)
@project_created.connect(weak=False)
def record_project_created(project, user, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="first_project", complete=True
)
@member_joined.connect(weak=False)
def record_member_joined(member, **kwargs):
FeatureAdoption.objects.record(
organization_id=member.organization_id, feature_slug="invite_team", complete=True
)
@issue_assigned.connect(weak=False)
def record_issue_assigned(project, group, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="assignment", complete=True
)
@issue_resolved_in_release.connect(weak=False)
def record_issue_resolved_in_release(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="resolved_in_release", complete=True
)
@advanced_search.connect(weak=False)
def record_advanced_search(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="advanced_search", complete=True
)
@save_search_created.connect(weak=False)
def record_save_search_created(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="saved_search", complete=True
)
@inbound_filter_toggled.connect(weak=False)
def record_inbound_filter_toggled(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="inbound_filters", complete=True
)
@alert_rule_created.connect(weak=False)
def record_alert_rule_created(project, rule, **kwargs):
if rule.label == DEFAULT_RULE_LABEL and rule.data == DEFAULT_RULE_DATA:
return
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="alert_rules", complete=True
)
@plugin_enabled.connect(weak=False)
def record_plugin_enabled(plugin, project, user, **kwargs):
if isinstance(plugin, (IssueTrackingPlugin, IssueTrackingPlugin2)):
FeatureAdoption.objects.record(
organization_id=project.organization_id,
feature_slug="issue_tracker_integration",
complete=True
)
elif isinstance(plugin, NotificationPlugin):
FeatureAdoption.objects.record(
organization_id=project.organization_id,
feature_slug="notification_integration",
complete=True
)
@sso_enabled.connect(weak=False)
def record_sso_enabled(organization, **kwargs):
FeatureAdoption.objects.record(
organization_id=organization.id, feature_slug="sso", complete=True
)
@data_scrubber_enabled.connect(weak=False)
def record_data_scrubber_enabled(organization, **kwargs):
FeatureAdoption.objects.record(
organization_id=organization.id, feature_slug="data_scrubbers", complete=True
)
def deleted_and_discarded_issue(instance, created, **kwargs):
if created:
FeatureAdoption.objects.record(
organization_id=instance.project.organization_id,
feature_slug="delete_and_discard"
)
@repo_linked.connect(weak=False)
def record_repo_linked(repo, **kwargs):
FeatureAdoption.objects.record(
organization_id=repo.organization_id, feature_slug="repo_linked", complete=True
)
@release_created.connect(weak=False)
def record_release_created(release, **kwargs):
FeatureAdoption.objects.record(
organization_id=release.organization_id, feature_slug="release_created", complete=True
)
@deploy_created.connect(weak=False)
def record_deploy_created(deploy, **kwargs):
FeatureAdoption.objects.record(
organization_id=deploy.organization_id, feature_slug="deploy_created", complete=True
)
@resolved_with_commit.connect(weak=False)
def record_resolved_with_commit(organization_id, **kwargs):
FeatureAdoption.objects.record(
organization_id=organization_id, feature_slug="resolved_with_commit", complete=True
)
@ownership_rule_created.connect(weak=False)
def record_ownership_rule_created(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="ownership_rule_created", complete=True
)
@issue_ignored.connect(weak=False)
def record_issue_ignored(project, **kwargs):
FeatureAdoption.objects.record(
organization_id=project.organization_id, feature_slug="issue_ignored", complete=True
)
post_save.connect(
deleted_and_discarded_issue,
sender=GroupTombstone,
dispatch_uid='analytics.grouptombstone.created',
weak=False,
)
|
{
"content_hash": "fc39b3801e0d497608a8437fcf2bb19b",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 101,
"avg_line_length": 31.93951612903226,
"alnum_prop": 0.7124100492362075,
"repo_name": "ifduyue/sentry",
"id": "0d31f2adad330ab44dd392bb1b932883c5efb3a1",
"size": "7921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/receivers/features.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
import fnmatch
import functools
import os
import re
import sys
import traceback
from contextlib import contextmanager
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypt_serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from deprecation import deprecated
import six
import cloudbridge
from ..interfaces.exceptions import InvalidParamException
def generate_key_pair():
"""
This method generates a keypair and returns it as a tuple
of (public, private) keys.
The public key format is OpenSSH and private key format is PEM.
"""
key_pair = rsa.generate_private_key(
backend=default_backend(),
public_exponent=65537,
key_size=2048)
private_key = key_pair.private_bytes(
crypt_serialization.Encoding.PEM,
crypt_serialization.PrivateFormat.PKCS8,
crypt_serialization.NoEncryption()).decode('utf-8')
public_key = key_pair.public_key().public_bytes(
crypt_serialization.Encoding.OpenSSH,
crypt_serialization.PublicFormat.OpenSSH).decode('utf-8')
return public_key, private_key
def filter_by(prop_name, kwargs, objs):
"""
Utility method for filtering a list of objects by a property.
If the given property has a non empty value in kwargs, then
the list of objs is filtered by that value. Otherwise, the
list of objs is returned as is.
"""
prop_val = kwargs.pop(prop_name, None)
if prop_val:
if isinstance(prop_val, six.string_types):
regex = fnmatch.translate(prop_val)
results = [o for o in objs
if getattr(o, prop_name)
and re.search(regex, getattr(o, prop_name))]
else:
results = [o for o in objs
if getattr(o, prop_name) == prop_val]
return results
else:
return objs
def generic_find(filter_names, kwargs, objs):
"""
Utility method for filtering a list of objects by a list of filters.
"""
matches = objs
for name in filter_names:
matches = filter_by(name, kwargs, matches)
# All kwargs should have been popped at this time.
if len(kwargs) > 0:
raise InvalidParamException(
"Unrecognised parameters for search: %s. Supported attributes: %s"
% (kwargs, filter_names))
return matches
@contextmanager
def cleanup_action(cleanup_func):
"""
Context manager to carry out a given
cleanup action after carrying out a set
of tasks, or when an exception occurs.
If any errors occur during the cleanup
action, those are ignored, and the original
traceback is preserved.
:params func: This function is called if
an exception occurs or at the end of the
context block. If any exceptions raised
by func are ignored.
Usage:
with cleanup_action(lambda e: print("Oops!")):
do_something()
"""
try:
yield
except Exception:
ex_class, ex_val, ex_traceback = sys.exc_info()
try:
cleanup_func()
except Exception as e:
print("Error during exception cleanup: {0}".format(e))
traceback.print_exc()
six.reraise(ex_class, ex_val, ex_traceback)
try:
cleanup_func()
except Exception as e:
print("Error during cleanup: {0}".format(e))
traceback.print_exc()
def get_env(varname, default_value=None):
"""
Return the value of the environment variable or default_value.
This is a helper method that wraps ``os.environ.get`` to ensure type
compatibility across py2 and py3. For py2, any value obtained from an
environment variable, ensure ``unicode`` type and ``str`` for py3. The
casting is done only for string variables.
:type varname: ``str``
:param varname: Name of the environment variable for which to check.
:param default_value: Return this value is the env var is not found.
Defaults to ``None``.
:return: Value of the supplied environment if found; value of
``default_value`` otherwise.
"""
value = os.environ.get(varname, default_value)
if isinstance(value, six.string_types) and not isinstance(
value, six.text_type):
return six.u(value)
return value
# Alias deprecation decorator, following:
# https://stackoverflow.com/questions/49802412/
# how-to-implement-deprecation-in-python-with-argument-alias
def deprecated_alias(**aliases):
def deco(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
rename_kwargs(f.__name__, kwargs, aliases)
return f(*args, **kwargs)
return wrapper
return deco
def rename_kwargs(func_name, kwargs, aliases):
for alias, new in aliases.items():
if alias in kwargs:
if new in kwargs:
raise InvalidParamException(
'{} received both {} and {}'.format(func_name, alias, new))
# Manually invoke the deprecated decorator with an empty lambda
# to signal deprecation
deprecated(deprecated_in='1.1',
removed_in='2.0',
current_version=cloudbridge.__version__,
details='{} is deprecated, use {} instead'.format(
alias, new))(lambda: None)()
kwargs[new] = kwargs.pop(alias)
NON_ALPHA_NUM = re.compile(r"[^A-Za-z0-9]+")
def to_resource_name(value, replace_with="-"):
"""
Converts a given string to a valid resource name by stripping
all characters that are not alphanumeric.
:param value: the value to strip
:param replace_with: the value to replace mismatching characters with
:return: a string with all mismatching characters removed.
"""
val = re.sub(NON_ALPHA_NUM, replace_with, value)
return val.strip("-")
|
{
"content_hash": "bc7808ecbe1a28dc472dfc1c72160576",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 32.6448087431694,
"alnum_prop": 0.6397723468362906,
"repo_name": "gvlproject/libcloudbridge",
"id": "589a00a8bfa78397bcea3b9398538aa483e985e2",
"size": "5974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudbridge/base/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "341087"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
from .compat import utf8
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while (start > 0 and
self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029'):
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while (end < len(self.buffer) and
self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = utf8(self.buffer[start:end])
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
|
{
"content_hash": "31988acd420b0bbedd12b66566b0e7df",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 73,
"avg_line_length": 32.975,
"alnum_prop": 0.5310841546626232,
"repo_name": "flgiordano/netcash",
"id": "2b7690430188cfc2b6cce953edc12fe67cb82643",
"size": "2638",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/third_party/ruamel/yaml/error.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
}
|
'''
Add alias fields to address points
from the cloest road with the same name.
'''
import arcpy, os, time
from time import strftime
class Fields (object):
def __init__(self):
self._fieldList = []
def getI(self, field):
return self._fieldList.index(field)
def getFieldList(self):
return self._fieldList
class RoadFields(Fields):
def __init__(self, roads):
self.objectId = arcpy.Describe(roads).OIDFieldName
self.prefixDir = "PRE_DIR"
self.street = "S_NAME"
self.streetType = "S_TYPE"
# self.alias1 = "ALIAS1"
# self.alias1Type = "ALIAS1TYPE"
self.acsName = "ACS_STREET"
self.acsSuffix = "ACS_SUFDIR"
self.streetNameFields = [self.prefixDir, self.street, self.streetType]
self.aliasFields = [self.acsName, self.acsSuffix]
self._fieldList = [self.objectId, self.prefixDir, self.street, self.streetType,self.acsName, self.acsSuffix]
class AddressPointFields(Fields):
def __init__(self, addrPoints):
self.objectId = arcpy.Describe(addrPoints).OIDFieldName
self.prefixDir = "PreDir"
self.street = "S_NAME"
self.streetType = "Suf"
self._fieldList = [self.objectId, self.prefixDir, self.street, self.streetType]
class Main(object):
def deletelayerIfExist(self, layerName):
if arcpy.Exists(layerName):
arcpy.Delete_management(layerName)
def start(self, roadFeature, addrPointsFeature, outputDirectory):
#Output layers.
outputGdbName = "AcsAliasAdd_" + "Results" + ".gdb"
arcpy.CreateFileGDB_management(outputDirectory, outputGdbName)
outputGdb = os.path.join(outputDirectory, outputGdbName)
#Intermediate tables.
tempGdbName = "temp_" + strftime("%Y%m%d%H%M%S") + ".gdb"
arcpy.CreateFileGDB_management(outputDirectory, tempGdbName)
tempGdb = os.path.join(outputDirectory, tempGdbName)
roads = os.path.join(outputGdb, "AliasRoads")
addrPoints = os.path.join(outputGdb, "AddrPoints_W_Alias")
freqTable = os.path.join(tempGdb, "roadFreq")
outputNear = os.path.join(tempGdb, "finalNear")
#Copy features
acsRoadsWhere = """("{0}" <> '' and "{0}" is not null)""".format("ACS_STREET")
arcpy.MakeFeatureLayer_management (roadFeature, "acsRoads", acsRoadsWhere)
arcpy.CopyFeatures_management("acsRoads", roads)
arcpy.CopyFeatures_management(addrPointsFeature, addrPoints)
#Fied Objects for input layers. Schema changes can be handled in the Field classes
roadFields = RoadFields(roads)
roadLayer = "roads"
addrPointFields = AddressPointFields(addrPoints)
addrPointLayer = "addressPoints"
#Local accumulator variables
intermediateNears = []
nearBaseName = "near"
arcpy.Frequency_analysis(roads, freqTable, roadFields.streetNameFields)
#Frequency table count for measuring progress
self.deletelayerIfExist("freqCount")
arcpy.MakeTableView_management(freqTable, "freqCount")
uniqueRoadNamesCount = arcpy.GetCount_management("freqCount")
uniqueRoadNamesCount = int(uniqueRoadNamesCount.getOutput(0))
print "Unique street names: {}".format(uniqueRoadNamesCount)
#Index feature classes to speed up selections
arcpy.AddIndex_management (addrPoints,
"{};{};{}".format(addrPointFields.street, addrPointFields.prefixDir, addrPointFields.streetType),
"addrPnt_IDX11", "NON_UNIQUE", "ASCENDING")
arcpy.AddIndex_management (roads,
"{};{};{}".format(roadFields.street, roadFields.prefixDir, roadFields.streetType),
"roads_IDX11", "NON_UNIQUE", "ASCENDING")
nearNameI = 1;
uniqueRoadNamesCount = float(uniqueRoadNamesCount)
#Create point and road feature layers for selections
self.deletelayerIfExist(addrPointLayer)
arcpy.MakeFeatureLayer_management (addrPoints, addrPointLayer)
self.deletelayerIfExist(roadLayer)
arcpy.MakeFeatureLayer_management (roads, roadLayer)
#Main loop. Selects points and roads that share a street name and does Near analysis
print "Precent Complete: %0"
with arcpy.da.SearchCursor(freqTable, roadFields.streetNameFields) as cursor:
for row in cursor:
#Progress update
if nearNameI % 128 == 0:
print "Precent Complete: %{0:0.2f}".format(nearNameI / uniqueRoadNamesCount * 100)
preDirVal = row[0]#roadFields.getI(roadFields.prefixDir)]
streetVal = row[1]#roadFields.getI(roadFields.street)]
typeVal = row[2]#roadFields.getI(roadFields.streetType)]
addrPointWhere = """"{}" = '{}' and "{}" = '{}' and "{}" = '{}'""".format(addrPointFields.prefixDir, preDirVal,
addrPointFields.street, streetVal,
addrPointFields.streetType, typeVal)
arcpy.SelectLayerByAttribute_management (addrPointLayer, "NEW_SELECTION", addrPointWhere)
roadWhere = """"{}" = '{}' and "{}" = '{}' and "{}" = '{}'""".format(roadFields.prefixDir, preDirVal,
roadFields.street, streetVal,
roadFields.streetType, typeVal)
arcpy.SelectLayerByAttribute_management (roadLayer, "NEW_SELECTION", roadWhere)
tempNearName = os.path.join(tempGdb, nearBaseName + str(nearNameI))
arcpy.GenerateNearTable_analysis(addrPointLayer, roadLayer, tempNearName)
intermediateNears.append(tempNearName)
#Increment index to keep near tables unique
nearNameI += 1
#Merge intermediat near tables
print "Start Append"
ourputNear = os.path.join(outputGdb, "finalNear")
first = 1
for near in intermediateNears:
if first:
arcpy.CopyRows_management(near, outputNear)
first = 0
else:
arcpy.Append_management(near, outputNear)
#Join Object ID of closest road to point and add alias fields
arcpy.JoinField_management(addrPoints, addrPointFields.objectId, outputNear, "IN_FID", ["NEAR_FID", "NEAR_DIST"])
arcpy.JoinField_management(addrPoints, "NEAR_FID", roads, roadFields.objectId, roadFields.aliasFields)
print "Precent Complete: %100"
#arcpy.Delete_management(tempGdb)
if __name__ == '__main__':
roads = r"path to roads here"#Path to road data
addressPoints = r"path to address points here"#Path to address point data
outputDirectory = r"output directory here"#Output working directory
totalTime = time.time()#timer
aliasAdder = Main();
aliasAdder.start(roads, addressPoints, outputDirectory)
print "Total Time: {0:.03f} seconds".format(time.time() - totalTime)
|
{
"content_hash": "955f1cbd8cfce04dceb1464420a2899a",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 133,
"avg_line_length": 48.01875,
"alnum_prop": 0.58466744761161,
"repo_name": "agrc/acs-alias-address-point",
"id": "7757bba763e618601ec5c5f35201fbe14424dfcd",
"size": "7683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ToolModules/AcsAliasAdder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7683"
}
],
"symlink_target": ""
}
|
FIGSHARE = 'FigShare'
# MODEL MESSAGES :model.py
BEFORE_PAGE_LOAD_PRIVATE_NODE_MIXED_FS = 'Warnings: This OSF {category} is private but ' + FIGSHARE + ' project {project_id} may contain some public files or filesets'
BEFORE_PAGE_LOAD_PUBLIC_NODE_MIXED_FS = 'Warnings: This OSF {category} is public but ' + FIGSHARE + ' project {project_id} may contain some private files or filesets'
BEFORE_PAGE_LOAD_PERM_MISMATCH = 'Warnings: This OSF {category} is {node_perm}, but the ' + FIGSHARE + ' article {figshare_id} is {figshare_perm}'
BEFORE_PAGE_LOAD_PUBLIC_NODE_PRIVATE_FS = 'Users can view the contents of this private ' + FIGSHARE + ' article.'
BEFORE_REMOVE_CONTRIBUTOR = 'The ' + FIGSHARE + ' add-on for this {category} is authenticated by {user}. Removing this user will also remove write access to the {category} unless another contributor re-authenticates. '
AFTER_REMOVE_CONTRIBUTOR = 'Because the ' + FIGSHARE + ' add-on for this {category} was authenticated by {user}, authentication information has been deleted. You can re-authenticate on the <a href="{url}settings/">Settings</a> page.'
BEFORE_FORK_OWNER = 'Because you have authenticated the ' + FIGSHARE + ' add-on for this {category}, forking it will also transfer your authorization to the forked {category}.'
BEFORE_FORK_NOT_OWNER = 'Because this ' + FIGSHARE + ' add-on has been authenticated by a different user, forking it will not transfer authentication to the forked {category}.'
AFTER_FORK_OWNER = '' + FIGSHARE + ' authorization copied to forked {category}.'
AFTER_FORK_NOT_OWNER = '' + FIGSHARE + ' authorization not copied to forked {category}. You may authorize this fork on the <a href={url}>Settings</a> page.'
BEFORE_REGISTER = 'The contents of ' + FIGSHARE + ' projects cannot be registered at this time. The ' + FIGSHARE + ' data associated with this {category} will not be included as part of this registration.'
# END MODEL MESSAGES
# MFR MESSAGES :views/crud.py
FIGSHARE_VIEW_FILE_PRIVATE = 'Since this ' + FIGSHARE + ' file is unpublished we cannot render it. In order to access this content you will need to log into the <a href="{url}">' + FIGSHARE + ' page</a> and view it there.'
FIGSHARE_VIEW_FILE_OVERSIZED = 'This ' + FIGSHARE + ' file is too large to render; <a href="{url}">download file</a> to view it.'
'''
Publishing this article is an irreversible operation. Once a FigShare article is published it can never be deleted. Proceed with caution.
<br /><br />
Also, FigShare requires some additional info before this article can be published: <br />
<form id='figsharePublishForm' action='${nodeApiUrl}figshare/publish/article/${parent_id}/'>
<h3><label><Title></label></h3>
<input name='title' type='text' value='${figshare_title}'>
<h3><label>Category:</label></h3>
<select name='cat' id='figshareCategory' value='${figshare_category}'>${figshare_categories}</select><br />
<h3><label>Tag(s):</label></h3>
<input name='tags' type='text' value='${figshare_tags}' placeholder='e.g. neuroscience, cognition'><br />
<h3><label>Description</label></h3>
<textarea name='description' placeholder='Please type a description of this file here'>${figshare_desc}</textarea>
</form>
'''
OAUTH_INVALID = 'Your OAuth key for FigSahre is no longer valid. Please re-authenticate.'
# END MFR MESSAGES
|
{
"content_hash": "889af3fa43a49ab2f67162f6d6d5a664",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 233,
"avg_line_length": 66.58,
"alnum_prop": 0.7227395614298588,
"repo_name": "AndrewSallans/osf.io",
"id": "7d0615bb54da605be67fb52825731e901c43f257",
"size": "3329",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/addons/figshare/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "70439"
},
{
"name": "JavaScript",
"bytes": "2555546"
},
{
"name": "Python",
"bytes": "2159449"
}
],
"symlink_target": ""
}
|
import errno
import sys
import unittest
import time
sys.path.append('./lib')
sys.path.append('../python')
sys.path.append('../api/generated/py')
from warp17_ut import Warp17UnitTestCase
from warp17_ut import Warp17NoTrafficTestCase
from b2b_setup import *
from warp17_common_pb2 import *
from warp17_l3_pb2 import *
from warp17_app_raw_pb2 import *
from warp17_server_pb2 import *
from warp17_client_pb2 import *
from warp17_test_case_pb2 import *
from warp17_service_pb2 import *
class TestApi(Warp17UnitTestCase):
"""Tests the functionality of the API."""
"""Assumes a B2B setup with two ports."""
"""Port 0 <-> Port 1"""
PORT_CNT = 2
def test_configure_port_valid_no_intf_no_gw(self):
"""Tests the ConfigurePort API with 0 interfaces and no default gw"""
for eth_port in range(0, self.PORT_CNT):
pcfg = b2b_configure_port(eth_port, def_gw=Ip(ip_version=IPV4, ip_v4=0))
error = self.warp17_call('ConfigurePort', pcfg)
self.assertEqual(error.e_code, 0, 'ConfigurePort')
for eth_port in range(0, self.PORT_CNT):
result = self.warp17_call('GetPortCfg',
PortArg(pa_eth_port=eth_port))
self.assertEqual(error.e_code, 0, 'GetPortCfg')
self.assertEqual(len(result.pcr_cfg.pc_l3_intfs), 0, 'L3IntfCnt')
self.assertTrue(result.pcr_cfg.pc_def_gw == Ip(ip_version=IPV4,
ip_v4=0),
'DefGw')
def test_configure_port_valid_max_intf_no_gw(self):
"""Tests the ConfigurePort API with max interfaces and no default gw"""
for eth_port in range(0, self.PORT_CNT):
pcfg = b2b_configure_port(eth_port,
def_gw=Ip(ip_version=IPV4, ip_v4=0),
l3_intf_count=TPG_TEST_MAX_L3_INTF)
error = self.warp17_call('ConfigurePort', pcfg)
self.assertEqual(error.e_code, 0, 'ConfigurePort')
for eth_port in range(0, self.PORT_CNT):
result = self.warp17_call('GetPortCfg',
PortArg(pa_eth_port=eth_port))
self.assertEqual(result.pcr_error.e_code, 0, 'GetPortCfg')
self.assertEqual(len(result.pcr_cfg.pc_l3_intfs),
TPG_TEST_MAX_L3_INTF,
'L3IntfCnt')
self.assertTrue(result.pcr_cfg.pc_def_gw == Ip(ip_version=IPV4,
ip_v4=0),
'DefGw')
for i in range(0, TPG_TEST_MAX_L3_INTF):
self.assertTrue(result.pcr_cfg.pc_l3_intfs[i] ==
L3Intf(l3i_ip=Ip(ip_version=IPV4,
ip_v4=b2b_ipv4(eth_port, i)),
l3i_mask=Ip(ip_version=IPV4,
ip_v4=b2b_mask(eth_port, i)),
l3i_count=b2b_count(eth_port, i)),
'L3Intf')
@unittest.expectedFailure
def test_configure_port_invalid_gt_max_intf(self):
"""Tests the ConfigurePort API with more than max interfaces"""
""""WARP17 validates the total interface count but doesn't return an error yet!"""
for eth_port in range(0, self.PORT_CNT):
pcfg = b2b_configure_port(eth_port,
def_gw = Ip(ip_version=IPV4, ip_v4=b2b_def_gw(eth_port)),
l3_intf_count = TPG_TEST_MAX_L3_INTF + 1)
error = self.warp17_call('ConfigurePort', pcfg)
self.assertEqual(error.e_code, -errno.EINVAL, 'ConfigurePort')
def test_configure_test_case_invalid_eth_port(self):
"""Tests the ConfigureTestCase API with invalid eth_port"""
eth_port = self.PORT_CNT + 1
l4cfg = L4Server(l4s_proto=UDP,
l4s_tcp_udp=TcpUdpServer(tus_ports=L4PortRange(l4pr_start=1,
l4pr_end=1)))
appcfg = AppServer(as_app_proto=RAW, as_raw=RawServer(rs_req_plen=0,
rs_resp_plen=0))
scfg = Server(srv_ips=IpRange(ipr_start=Ip(ip_version=IPV4,
ip_v4=b2b_ipv4(eth_port, 0)),
ipr_end=Ip(ip_version=IPV4,
ip_v4=b2b_ipv4(eth_port, 0) + 1)),
srv_l4=l4cfg,
srv_app=appcfg)
critcfg = TestCriteria(tc_crit_type=SRV_UP, tc_srv_up=1)
tccfg = TestCase(tc_type=SERVER, tc_eth_port=eth_port,
tc_id=0,
tc_server=scfg,
tc_criteria=critcfg,
tc_async=False)
error = self.warp17_call('ConfigureTestCase', tccfg)
self.assertEqual(error.e_code, -errno.EINVAL, 'ConfigureTestCase')
tcdel = TestCaseArg(tca_eth_port = 0,
tca_test_case_id=TPG_TEST_MAX_ENTRIES + 1)
error = self.warp17_call('DelTestCase', tcdel)
self.assertEqual(error.e_code, -errno.EINVAL, 'DelTestCase')
def test_configure_test_case_invalid_tcid(self):
"""Tests the ConfigureTestCase API with invalid tcid"""
l4cfg = L4Server(l4s_proto=UDP,
l4s_tcp_udp=TcpUdpServer(tus_ports=L4PortRange(l4pr_start=1,
l4pr_end=1)))
appcfg = AppServer(as_app_proto=RAW,
as_raw=RawServer(rs_req_plen=0, rs_resp_plen=0))
scfg = Server(srv_ips=IpRange(ipr_start=Ip(ip_version=IPV4,
ip_v4=b2b_ipv4(0, 0)),
ipr_end=Ip(ip_version=IPV4,
ip_v4=b2b_ipv4(0, 0) + 1)),
srv_l4=l4cfg,
srv_app=appcfg)
critcfg = TestCriteria(tc_crit_type=SRV_UP, tc_srv_up=1)
tccfg = TestCase(tc_type=SERVER, tc_eth_port=0,
tc_id=TPG_TEST_MAX_ENTRIES + 1,
tc_server=scfg,
tc_criteria=critcfg,
tc_async=False)
error = self.warp17_call('ConfigureTestCase', tccfg)
self.assertEqual(error.e_code, -errno.EINVAL, 'ConfigureTestCase')
tcdel = TestCaseArg(tca_eth_port=0,
tca_test_case_id=TPG_TEST_MAX_ENTRIES + 1)
error = self.warp17_call('DelTestCase', tcdel)
self.assertEqual(error.e_code, -errno.EINVAL, 'DelTestCase')
def _configure_client_test_cases(self, ip_count, l4_proto, l4_port_count,
req_plen, resp_plen, criteria, async,
expected_err):
l4cfg = L4Client(l4c_proto=l4_proto,
l4c_tcp_udp=TcpUdpClient(tuc_sports=b2b_ports(l4_port_count),
tuc_dports=b2b_ports(l4_port_count)))
rate_cfg = RateClient(rc_open_rate=Rate(r_value=42),
rc_close_rate=Rate(r_value=42),
rc_send_rate=Rate(r_value=42))
delay_cfg = DelayClient(dc_init_delay=Delay(d_value=42),
dc_uptime=Delay(d_value=42),
dc_downtime=Delay(d_value=42))
app_cfg = AppClient(ac_app_proto=RAW,
ac_raw=RawClient(rc_req_plen=req_plen,
rc_resp_plen=resp_plen))
tcs = [
TestCase(tc_type=CLIENT, tc_eth_port=eth_port,
tc_id=tcid,
tc_client=Client(cl_src_ips=b2b_sips(eth_port, ip_count),
cl_dst_ips=b2b_dips(eth_port, ip_count),
cl_l4=l4cfg,
cl_rates=rate_cfg,
cl_delays=delay_cfg,
cl_app=app_cfg),
tc_criteria=criteria,
tc_async=async)
for eth_port in range(0, self.PORT_CNT)
for tcid in range (0, TPG_TEST_MAX_ENTRIES)
]
for tccfg in tcs:
error = self.warp17_call('ConfigureTestCase', tccfg)
self.assertEqual(error.e_code, expected_err, 'ConfigureTestCase')
if expected_err != 0:
return
for tccfg in tcs:
tcget = TestCaseArg(tca_eth_port=tccfg.tc_eth_port,
tca_test_case_id=tccfg.tc_id)
tcget_result = self.warp17_call('GetTestCase', tcget)
self.assertEqual(tcget_result.tcr_error.e_code, 0, 'GetTestCase')
self.assertTrue(tcget_result.tcr_cfg == tccfg, 'GetTestCaseCfg')
tcdel = TestCaseArg(tca_eth_port=tccfg.tc_eth_port,
tca_test_case_id=tccfg.tc_id)
error = self.warp17_call('DelTestCase', tcdel)
self.assertEqual(error.e_code, 0, 'DelTestCase')
def test_configure_test_case_tcp_udp_client(self):
"""Tests the ConfigureTestCase API with TCP & UDP client testcases"""
for l4_proto in [TCP, UDP]:
for async in [True, False]:
# Positive test cases
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_client_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=SRV_UP, tc_srv_up=100)
self._configure_client_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=CL_UP, tc_cl_up=100)
self._configure_client_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=CL_ESTAB,
tc_cl_estab=100)
self._configure_client_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=DATAMB_SENT,
tc_data_mb_sent=100)
self._configure_client_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
# Negative test cases
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_client_test_cases(0, l4_proto, 1, 10, 10,
criteria,
async,
-errno.EINVAL)
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_client_test_cases(1, l4_proto, 0, 10, 10,
criteria,
async,
-errno.EINVAL)
def _configure_server_test_cases(self, ip_count, l4_proto, l4_port_count,
req_plen, resp_plen,
criteria, async, expected_err):
l4cfg = L4Server(l4s_proto=l4_proto,
l4s_tcp_udp=TcpUdpServer(tus_ports=b2b_ports(l4_port_count)))
app_cfg = AppServer(as_app_proto=RAW,
as_raw=RawServer(rs_req_plen=req_plen,
rs_resp_plen=resp_plen))
tcs = [
TestCase(tc_type=SERVER, tc_eth_port=eth_port,
tc_id=tcid,
tc_server=Server(srv_ips=b2b_sips(eth_port, ip_count),
srv_l4=l4cfg,
srv_app=app_cfg),
tc_criteria=criteria,
tc_async=async)
for eth_port in range(0, self.PORT_CNT)
for tcid in range(0, TPG_TEST_MAX_ENTRIES)
]
for tccfg in tcs:
error = self.warp17_call('ConfigureTestCase', tccfg)
self.assertEqual(error.e_code, expected_err, 'ConfigureTestCase')
if expected_err != 0:
return
for tccfg in tcs:
tcget = TestCaseArg(tca_eth_port=tccfg.tc_eth_port,
tca_test_case_id=tccfg.tc_id)
tcget_result = self.warp17_call('GetTestCase', tcget)
self.assertEqual(tcget_result.tcr_error.e_code, 0, 'GetTestCase')
self.assertTrue(tcget_result.tcr_cfg == tccfg, 'GetTestCaseCfg')
tcdel = TestCaseArg(tca_eth_port=tccfg.tc_eth_port,
tca_test_case_id=tccfg.tc_id)
error = self.warp17_call('DelTestCase', tcdel)
self.assertEqual(error.e_code, 0, 'DelTestCase')
def test_configure_test_case_tcp_udp_server(self):
"""Tests the ConfigureTestCase API with TCP & UDP server testcases"""
for l4_proto in [TCP, UDP]:
for async in [True, False]:
# Positive test cases
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_server_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=SRV_UP, tc_srv_up=100)
self._configure_server_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=CL_UP, tc_cl_up=100)
self._configure_server_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=CL_ESTAB, tc_cl_estab=100)
self._configure_server_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
criteria = TestCriteria(tc_crit_type=DATAMB_SENT,
tc_data_mb_sent=100)
self._configure_server_test_cases(1, l4_proto, 1, 10, 10,
criteria,
async,
0)
# Negative test cases
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_server_test_cases(0, l4_proto, 1, 10, 10,
criteria,
async,
-errno.EINVAL)
criteria = TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=3600)
self._configure_server_test_cases(1, l4_proto, 0, 10, 10,
criteria,
async,
-errno.EINVAL)
def test_port_start_negative(self):
"""Try to start a non-existent test"""
self.assertEqual(self.warp17_call('PortStart',
PortArg(pa_eth_port=0)).e_code,
-errno.ENOENT,
'PortStart')
def test_port_stop_negative(self):
"""Try to stop a non-existent test"""
self.assertEqual(self.warp17_call('PortStop',
PortArg(pa_eth_port=0)).e_code,
-errno.ENOENT,
'PortStop')
def test_single_session(self):
"""Setup a single UDP/TCP session and check that the test passed"""
"""Port 0 is the client, Port 1 is the server"""
# No def gw
no_def_gw = Ip(ip_version=IPV4, ip_v4=0)
# Setup interfaces on port 0
pcfg = b2b_port_add(0, def_gw = no_def_gw)
b2b_port_add_intfs(pcfg, [(Ip(ip_version=IPV4, ip_v4=b2b_ipv4(0, 0)),
Ip(ip_version=IPV4, ip_v4=b2b_mask(0, 0)),
b2b_count(0, 0))])
self.assertEqual(self.warp17_call('ConfigurePort',pcfg).e_code, 0,
'ConfigurePort')
# Setup interfaces on port 1
pcfg = b2b_port_add(1, def_gw = no_def_gw)
b2b_port_add_intfs(pcfg, [(Ip(ip_version=IPV4, ip_v4=b2b_ipv4(1, 0)),
Ip(ip_version=IPV4, ip_v4=b2b_mask(1, 0)),
b2b_count(1, 0))])
self.assertEqual(self.warp17_call('ConfigurePort', pcfg).e_code, 0,
'ConfigurePort')
rate_ccfg = RateClient(rc_open_rate=Rate(),
rc_close_rate=Rate(),
rc_send_rate=Rate())
delay_ccfg = DelayClient(dc_init_delay=Delay(d_value=0),
dc_uptime=Delay(),
dc_downtime=Delay())
app_ccfg = AppClient(ac_app_proto=RAW,
ac_raw=RawClient(rc_req_plen=10,
rc_resp_plen=10))
app_scfg = AppServer(as_app_proto=RAW,
as_raw=RawServer(rs_req_plen=10,
rs_resp_plen=10))
for l4_proto in [TCP, UDP]:
l4_ccfg = L4Client(l4c_proto=l4_proto,
l4c_tcp_udp=TcpUdpClient(tuc_sports=b2b_ports(1),
tuc_dports=b2b_ports(1)))
ccfg = TestCase(tc_type=CLIENT, tc_eth_port=0,
tc_id=0,
tc_client=Client(cl_src_ips=b2b_sips(0, 1),
cl_dst_ips=b2b_dips(0, 1),
cl_l4=l4_ccfg,
cl_rates=rate_ccfg,
cl_delays=delay_ccfg,
cl_app=app_ccfg),
tc_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=1),
tc_async=False)
self.assertEqual(self.warp17_call('ConfigureTestCase', ccfg).e_code,
0,
'ConfigureTestCase')
l4_scfg = L4Server(l4s_proto=l4_proto,
l4s_tcp_udp=TcpUdpServer(tus_ports=b2b_ports(1)))
scfg = TestCase(tc_type=SERVER, tc_eth_port=1, tc_id=0,
tc_server=Server(srv_ips=b2b_sips(1, 1),
srv_l4=l4_scfg,
srv_app=app_scfg),
tc_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=1),
tc_async=False)
self.assertEqual(self.warp17_call('ConfigureTestCase', scfg).e_code,
0,
'ConfigureTestCase')
# Start server test
self.assertEqual(self.warp17_call('PortStart',
PortArg(pa_eth_port=1)).e_code,
0,
'PortStart')
# Start client test
self.assertEqual(self.warp17_call('PortStart',
PortArg(pa_eth_port=0)).e_code,
0,
'PortStart')
# should be done in way less than 5 seconds!
time.sleep(5)
# Check client test to be passed
client_result = self.warp17_call('GetTestStatus',
TestCaseArg(tca_eth_port=0,
tca_test_case_id=0))
self.assertEqual(client_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(client_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(client_result.tsr_type, CLIENT,
'PortStatus CLIENT')
self.assertEqual(client_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
if l4_proto == TCP:
self.assertEqual(client_result.tsr_stats.tcs_client.tccs_estab,
1,
'PortStatus ESTAB')
# Check server test to be passed
server_result = self.warp17_call('GetTestStatus',
TestCaseArg(tca_eth_port=1,
tca_test_case_id=0))
self.assertEqual(server_result.tsr_error.e_code, 0, 'GetTestStatus')
self.assertEqual(server_result.tsr_state, PASSED,
'PortStatus PASSED')
self.assertEqual(server_result.tsr_type, SERVER,
'PortStatus SERVER')
self.assertEqual(server_result.tsr_l4_proto, l4_proto,
'PortStatus L4')
self.assertEqual(server_result.tsr_stats.tcs_server.tcss_estab,
1,
'PortStatus ESTAB')
# Stop server test
self.assertEqual(self.warp17_call('PortStop',
PortArg(pa_eth_port=1)).e_code,
0,
'PortStop')
# Fail to stop client test (already passed)
self.assertEqual(self.warp17_call('PortStop',
PortArg(pa_eth_port=1)).e_code,
-errno.ENOENT,
'PortStop')
# Delete client test
self.assertEqual(self.warp17_call('DelTestCase',
TestCaseArg(tca_eth_port=0,
tca_test_case_id=0)).e_code,
0,
'DelTestCase')
# Delete server test
self.assertEqual(self.warp17_call('DelTestCase',
TestCaseArg(tca_eth_port=1,
tca_test_case_id=0)).e_code,
0,
'DelTestCase')
##############################################################################
# Partial Get/Update APIs.
##############################################################################
class TestPartialPortApi(Warp17UnitTestCase):
"""Tests the functionality of the partial update/get port config APIs."""
"""Assumes a B2B setup with even two ports."""
"""Port 0 <-> Port 1"""
PORT_CNT = 2
def _get_server_test(self, eth_port, tc_id):
l4_scfg = L4Server(l4s_proto=TCP,
l4s_tcp_udp=TcpUdpServer(tus_ports=b2b_ports(1)))
app_scfg = AppServer(as_app_proto=RAW,
as_raw=RawServer(rs_req_plen=42,
rs_resp_plen=42))
return TestCase(tc_type=SERVER, tc_eth_port=eth_port, tc_id=tc_id,
tc_server=Server(srv_ips=b2b_sips(1, 1),
srv_l4=l4_scfg,
srv_app=app_scfg),
tc_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=1),
tc_async=False)
def _get_client_test(self, eth_port, tc_id):
l4cfg = L4Client(l4c_proto=TCP,
l4c_tcp_udp=TcpUdpClient(tuc_sports=b2b_ports(1),
tuc_dports=b2b_ports(1)))
rate_cfg = RateClient(rc_open_rate=Rate(r_value=42),
rc_close_rate=Rate(r_value=42),
rc_send_rate=Rate(r_value=42))
delay_cfg = DelayClient(dc_init_delay=Delay(d_value=42),
dc_uptime=Delay(d_value=42),
dc_downtime=Delay(d_value=42))
app_cfg = AppClient(ac_app_proto=RAW,
ac_raw=RawClient(rc_req_plen=1,
rc_resp_plen=1))
return TestCase(tc_type=CLIENT, tc_eth_port=eth_port,
tc_id=tc_id,
tc_client=Client(cl_src_ips=b2b_sips(eth_port, 1),
cl_dst_ips=b2b_dips(eth_port, 1),
cl_l4=l4cfg,
cl_rates=rate_cfg,
cl_delays=delay_cfg,
cl_app=app_cfg),
tc_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=42),
tc_async=False)
def setUp(self):
self._pcfg = b2b_configure_port(eth_port=0,
def_gw=Ip(ip_version=IPV4, ip_v4=42),
l3_intf_count=TPG_TEST_MAX_L3_INTF)
def tearDown(self):
clean_pcfg = b2b_configure_port(eth_port=0,
def_gw=Ip(ip_version=IPV4, ip_v4=0),
l3_intf_count=0)
self.warp17_call('ConfigurePort', clean_pcfg)
def test_configure_l3_intf_valid(self):
"""Tests the ConfigureL3Intf API with valid config"""
l3_arg = L3IntfArg(lia_eth_port=0,
lia_l3_intf=self._pcfg.pc_l3_intfs[0])
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code, 0,
'Connfigure Single L3 Intf')
result = self.warp17_call('GetPortCfg', PortArg(pa_eth_port=0))
self.assertEqual(result.pcr_error.e_code, 0, 'GetPortCfg Single')
self.assertEqual(len(result.pcr_cfg.pc_l3_intfs), 1, 'L3Intf Single Cnt')
self.assertTrue(self._pcfg.pc_l3_intfs[0] == result.pcr_cfg.pc_l3_intfs[0],
'L3Intf Single Eq')
def test_configure_l3_intf_invalid_port(self):
"""Tests the ConfigureL3Intf API with invalid port"""
l3_arg = L3IntfArg(lia_eth_port=self.PORT_CNT + 1,
lia_l3_intf=self._pcfg.pc_l3_intfs[0])
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code,
-errno.EINVAL,
'L3Intf invalid port')
def test_configure_l3_intf_duplicate(self):
"""Tests the ConfigureL3Intf API when trying to add a duplicate"""
l3_arg = L3IntfArg(lia_eth_port=0,
lia_l3_intf=self._pcfg.pc_l3_intfs[0])
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code, 0,
'Configure First L3 Intf')
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code,
-errno.EEXIST,
'Configure Duplicate L3 Intf')
def test_configure_l3_intf_test_running(self):
"""Tests the ConfigureL3Intf API when tests are already running"""
self.assertEqual(self.warp17_call('ConfigureTestCase',
self._get_server_test(0, 0)).e_code,
0,
'Configure Test Case')
self.assertEqual(self.warp17_call('PortStart', PortArg(pa_eth_port=0)).e_code, 0,
'Port Start')
l3_arg = L3IntfArg(lia_eth_port=0,
lia_l3_intf=self._pcfg.pc_l3_intfs[0])
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code,
-errno.EALREADY)
# Test already running cleanup
self.warp17_call('PortStop', PortArg(pa_eth_port=0))
self.warp17_call('DelTestCase', TestCaseArg(tca_eth_port=0, tca_test_case_id=0))
def test_configure_l3_intf_max_reached(self):
"""Tests the ConfigureL3Intf API when trying to add more than max"""
error = self.warp17_call('ConfigurePort', self._pcfg)
self.assertEqual(error.e_code, 0, 'ConfigurePort')
l3_arg = L3IntfArg(lia_eth_port=0,
lia_l3_intf=L3Intf(l3i_ip=Ip(ip_version=IPV4, ip_v4=b2b_ipv4(1, 0)),
l3i_mask=Ip(ip_version=IPV4, ip_v4=b2b_mask(1, 0)),
l3i_count=1))
self.assertEqual(self.warp17_call('ConfigureL3Intf', l3_arg).e_code,
-errno.ENOMEM,
'Configure MAX L3 Intf')
def test_configure_l3_gw_valid(self):
"""Tests the ConfigureL3Gw API with valid config"""
l3_gw_arg = L3GwArg(lga_eth_port=0, lga_gw=self._pcfg.pc_def_gw)
self.assertEqual(self.warp17_call('ConfigureL3Gw', l3_gw_arg).e_code, 0,
'Connfigure Valid Gw')
result = self.warp17_call('GetPortCfg', PortArg(pa_eth_port=0))
self.assertEqual(result.pcr_error.e_code, 0, 'GetPortCfg Single')
self.assertTrue(self._pcfg.pc_def_gw == result.pcr_cfg.pc_def_gw,
'Gw Eq')
def test_configure_l3_gw_invalid_port(self):
"""Tests the ConfigureL3Gw API with invalid port"""
l3_gw_arg = L3GwArg(lga_eth_port=4242, lga_gw=self._pcfg.pc_def_gw)
self.assertEqual(self.warp17_call('ConfigureL3Gw', l3_gw_arg).e_code,
-errno.EINVAL,
'Connfigure Gw invalid port')
def test_configure_l3_gw_test_running(self):
"""Tests the ConfigureL3Gw API when tests are already running"""
self.assertEqual(self.warp17_call('ConfigureTestCase',
self._get_server_test(0, 0)).e_code,
0,
'Configure Test Case')
self.assertEqual(self.warp17_call('PortStart', PortArg(pa_eth_port=0)).e_code, 0,
'Port Start')
l3_gw_arg = L3GwArg(lga_eth_port=0, lga_gw=self._pcfg.pc_def_gw)
self.assertEqual(self.warp17_call('ConfigureL3Gw', l3_gw_arg).e_code,
-errno.EALREADY)
# Test already running cleanup
self.warp17_call('PortStop', PortArg(pa_eth_port=0))
self.warp17_call('DelTestCase', TestCaseArg(tca_eth_port=0, tca_test_case_id=0))
class TestPartialApi(Warp17NoTrafficTestCase, Warp17UnitTestCase):
"""Tests the functionality of the partial update config APIs."""
def get_updates(self):
tca = TestCaseArg(tca_eth_port=0, tca_test_case_id=0)
# First client updates:
yield (UpdateArg(ua_tc_arg=tca,
ua_rate_open=Rate(r_value=84),
ua_rate_send=Rate(r_value=84),
ua_rate_close=Rate(r_value=84)), None)
yield (UpdateArg(ua_tc_arg=tca,
ua_init_delay=Delay(d_value=84),
ua_uptime=Delay(d_value=84),
ua_downtime=Delay(d_value=84)), None)
yield (UpdateArg(ua_tc_arg=tca,
ua_criteria=TestCriteria(tc_crit_type=RUN_TIME,
tc_run_time_s=84)), None)
# Now server updates:
yield (None, UpdateArg(ua_tc_arg=tca,
ua_criteria=TestCriteria(tc_crit_type=SRV_UP,
tc_srv_up=42)))
# Now common updates:
for async in [True, False]:
yield (UpdateArg(ua_tc_arg=tca, ua_async=async),
UpdateArg(ua_tc_arg=tca, ua_async=async))
def get_invalid_updates(self):
for _ in []: yield ()
def _update(self, tc_arg, update_arg, expected_err=0):
if update_arg is None:
return
update_arg.ua_tc_arg.tca_eth_port = tc_arg.tca_eth_port
update_arg.ua_tc_arg.tca_test_case_id = tc_arg.tca_test_case_id
err = self.warp17_call('UpdateTestCase', update_arg)
self.assertEqual(err.e_code, expected_err)
def update_client(self, tc_arg, update_arg, expected_err=0):
self._update(tc_arg, update_arg, expected_err)
def update_server(self, tc_arg, update_arg, expected_err=0):
self._update(tc_arg, update_arg, expected_err)
class TestPartialAppApi(Warp17NoTrafficTestCase, Warp17UnitTestCase):
"""Tests the functionality of the partial update app config APIs."""
tca = TestCaseArg(tca_eth_port=0, tca_test_case_id=0)
cl_app = AppClient(ac_app_proto=RAW,
ac_raw=RawClient(rc_req_plen=84, rc_resp_plen=84))
srv_app = AppServer(as_app_proto=RAW, as_raw=RawServer(rs_req_plen=42,
rs_resp_plen=42))
def get_updates(self):
yield (UpdClientArg(uca_tc_arg=self.tca, uca_cl_app=self.cl_app),
UpdServerArg(usa_tc_arg=self.tca, usa_srv_app=self.srv_app))
def get_invalid_updates(self):
yield (UpdServerArg(usa_tc_arg=self.tca, usa_srv_app=self.srv_app),
UpdClientArg(uca_tc_arg=self.tca, uca_cl_app=self.cl_app))
def _update(self, tc_arg, update_arg):
if update_arg.__class__.__name__ == 'UpdClientArg':
update_arg.uca_tc_arg.tca_eth_port = tc_arg.tca_eth_port
update_arg.uca_tc_arg.tca_test_case_id = tc_arg.tca_test_case_id
err = self.warp17_call('UpdateTestCaseAppClient', update_arg)
elif update_arg.__class__.__name__ == 'UpdServerArg':
update_arg.usa_tc_arg.tca_eth_port = tc_arg.tca_eth_port
update_arg.usa_tc_arg.tca_test_case_id = tc_arg.tca_test_case_id
err = self.warp17_call('UpdateTestCaseAppServer', update_arg)
return err
def update_client(self, tc_arg, update_arg, expected_err=0):
err = self._update(tc_arg, update_arg)
self.assertEqual(err.e_code, expected_err)
if expected_err == 0:
cl_result = self.warp17_call('GetTestCaseAppClient',
update_arg.uca_tc_arg)
self.assertEqual(cl_result.tccr_error.e_code, 0)
self.assertTrue(cl_result.tccr_cl_app == update_arg.uca_cl_app)
def update_server(self, tc_arg, update_arg, expected_err=0):
err = self._update(tc_arg, update_arg)
self.assertEqual(err.e_code, expected_err)
if expected_err == 0:
srv_result = self.warp17_call('GetTestCaseAppServer',
update_arg.usa_tc_arg)
self.assertEqual(srv_result.tcsr_error.e_code, 0)
self.assertTrue(srv_result.tcsr_srv_app == update_arg.usa_srv_app)
|
{
"content_hash": "77ec4a2269e673cac30e68b0964ffb86",
"timestamp": "",
"source": "github",
"line_count": 776,
"max_line_length": 97,
"avg_line_length": 47.63015463917526,
"alnum_prop": 0.46513893022374936,
"repo_name": "jlijian3/warp17",
"id": "4d2e4ef9782c62b920e4aa7bda7ea9a97b5c6eca",
"size": "39028",
"binary": false,
"copies": "1",
"ref": "refs/heads/1.4",
"path": "ut/test_api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1376553"
},
{
"name": "C++",
"bytes": "13457"
},
{
"name": "Gnuplot",
"bytes": "5996"
},
{
"name": "Makefile",
"bytes": "3078"
},
{
"name": "Perl 6",
"bytes": "3168"
},
{
"name": "Python",
"bytes": "163267"
},
{
"name": "Shell",
"bytes": "5699"
}
],
"symlink_target": ""
}
|
'''
Unpickle a small test knowledge base and do a little demo inference
------------------------------------------------------------------------------
SketchNet
Iulius Curt @ 2013
------------------------------------------------------------------------------
'''
from knowledge_repr import *
from svghelpers import *
import cPickle as pickle
def mount(action, entities):
assert len(action.components) == len(entities)
# TODO: mount each entity in it's spot on the action sketch
if __name__ == '__main__':
# UnPickle the knowledge base
with open('testkb.pkl', 'rb') as pf:
kb = pickle.load(pf)
print kb
fig1 = load_sketch('flying-thing')
fig2 = load_sketch('mpt2')
regfig = register_sketches('flying-thing', fig1, 'mpt2', fig2)
# Save generated SVG file
regfig.save("fig_final.svg")
|
{
"content_hash": "302be20f1c3f2e8145e11d5ed13d1bf8",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 78,
"avg_line_length": 24.057142857142857,
"alnum_prop": 0.5380047505938242,
"repo_name": "iuliux/SketchNet",
"id": "240e182ec39d3b9ae683802b9cf0b554a631aef3",
"size": "842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testinfere.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9592"
}
],
"symlink_target": ""
}
|
from supybot.test import *
import os
import unittest
import supybot.conf as conf
import supybot.world as world
import supybot.ircdb as ircdb
import supybot.ircutils as ircutils
class IrcdbTestCase(SupyTestCase):
def setUp(self):
world.testing = False
SupyTestCase.setUp(self)
def tearDown(self):
world.testing = True
SupyTestCase.tearDown(self)
class FunctionsTestCase(IrcdbTestCase):
def testIsAntiCapability(self):
self.failIf(ircdb.isAntiCapability('foo'))
self.failIf(ircdb.isAntiCapability('#foo,bar'))
self.failUnless(ircdb.isAntiCapability('-foo'))
self.failUnless(ircdb.isAntiCapability('#foo,-bar'))
self.failUnless(ircdb.isAntiCapability('#foo.bar,-baz'))
def testIsChannelCapability(self):
self.failIf(ircdb.isChannelCapability('foo'))
self.failUnless(ircdb.isChannelCapability('#foo,bar'))
self.failUnless(ircdb.isChannelCapability('#foo.bar,baz'))
self.failUnless(ircdb.isChannelCapability('#foo,bar.baz'))
def testMakeAntiCapability(self):
self.assertEqual(ircdb.makeAntiCapability('foo'), '-foo')
self.assertEqual(ircdb.makeAntiCapability('#foo,bar'), '#foo,-bar')
def testMakeChannelCapability(self):
self.assertEqual(ircdb.makeChannelCapability('#f', 'b'), '#f,b')
self.assertEqual(ircdb.makeChannelCapability('#f', '-b'), '#f,-b')
def testFromChannelCapability(self):
self.assertEqual(ircdb.fromChannelCapability('#foo,bar'),
['#foo', 'bar'])
self.assertEqual(ircdb.fromChannelCapability('#foo.bar,baz'),
['#foo.bar', 'baz'])
self.assertEqual(ircdb.fromChannelCapability('#foo,bar.baz'),
['#foo', 'bar.baz'])
def testUnAntiCapability(self):
self.assertEqual(ircdb.unAntiCapability('-bar'), 'bar')
self.assertEqual(ircdb.unAntiCapability('#foo,-bar'), '#foo,bar')
self.assertEqual(ircdb.unAntiCapability('#foo.bar,-baz'),
'#foo.bar,baz')
def testInvertCapability(self):
self.assertEqual(ircdb.invertCapability('bar'), '-bar')
self.assertEqual(ircdb.invertCapability('-bar'), 'bar')
self.assertEqual(ircdb.invertCapability('#foo,bar'), '#foo,-bar')
self.assertEqual(ircdb.invertCapability('#foo,-bar'), '#foo,bar')
class CapabilitySetTestCase(SupyTestCase):
def testGeneral(self):
d = ircdb.CapabilitySet()
self.assertRaises(KeyError, d.check, 'foo')
d = ircdb.CapabilitySet(('foo',))
self.failUnless(d.check('foo'))
self.failIf(d.check('-foo'))
d.add('bar')
self.failUnless(d.check('bar'))
self.failIf(d.check('-bar'))
d.add('-baz')
self.failIf(d.check('baz'))
self.failUnless(d.check('-baz'))
d.add('-bar')
self.failIf(d.check('bar'))
self.failUnless(d.check('-bar'))
d.remove('-bar')
self.assertRaises(KeyError, d.check, '-bar')
self.assertRaises(KeyError, d.check, 'bar')
def testReprEval(self):
s = ircdb.UserCapabilitySet()
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
s.add('foo')
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
s.add('bar')
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
def testContains(self):
s = ircdb.CapabilitySet()
self.failIf('foo' in s)
self.failIf('-foo' in s)
s.add('foo')
self.failUnless('foo' in s)
self.failUnless('-foo' in s)
s.remove('foo')
self.failIf('foo' in s)
self.failIf('-foo' in s)
s.add('-foo')
self.failUnless('foo' in s)
self.failUnless('-foo' in s)
def testCheck(self):
s = ircdb.CapabilitySet()
self.assertRaises(KeyError, s.check, 'foo')
self.assertRaises(KeyError, s.check, '-foo')
s.add('foo')
self.failUnless(s.check('foo'))
self.failIf(s.check('-foo'))
s.remove('foo')
self.assertRaises(KeyError, s.check, 'foo')
self.assertRaises(KeyError, s.check, '-foo')
s.add('-foo')
self.failIf(s.check('foo'))
self.failUnless(s.check('-foo'))
s.remove('-foo')
self.assertRaises(KeyError, s.check, 'foo')
self.assertRaises(KeyError, s.check, '-foo')
def testAdd(self):
s = ircdb.CapabilitySet()
s.add('foo')
s.add('-foo')
self.failIf(s.check('foo'))
self.failUnless(s.check('-foo'))
s.add('foo')
self.failUnless(s.check('foo'))
self.failIf(s.check('-foo'))
class UserCapabilitySetTestCase(SupyTestCase):
def testOwnerHasAll(self):
d = ircdb.UserCapabilitySet(('owner',))
self.failIf(d.check('-foo'))
self.failUnless(d.check('foo'))
def testOwnerIsAlwaysPresent(self):
d = ircdb.UserCapabilitySet()
self.failUnless('owner' in d)
self.failUnless('-owner' in d)
self.failIf(d.check('owner'))
d.add('owner')
self.failUnless(d.check('owner'))
def testReprEval(self):
s = ircdb.UserCapabilitySet()
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
s.add('foo')
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
s.add('bar')
self.assertEqual(s, eval(repr(s), ircdb.__dict__, ircdb.__dict__))
def testOwner(self):
s = ircdb.UserCapabilitySet()
s.add('owner')
self.failUnless('foo' in s)
self.failUnless('-foo' in s)
self.failUnless(s.check('owner'))
self.failIf(s.check('-owner'))
self.failIf(s.check('-foo'))
self.failUnless(s.check('foo'))
## def testWorksAfterReload(self):
## s = ircdb.UserCapabilitySet(['owner'])
## self.failUnless(s.check('owner'))
## import sets
## reload(sets)
## self.failUnless(s.check('owner'))
class IrcUserTestCase(IrcdbTestCase):
def testCapabilities(self):
u = ircdb.IrcUser()
u.addCapability('foo')
self.failUnless(u._checkCapability('foo'))
self.failIf(u._checkCapability('-foo'))
u.addCapability('-bar')
self.failUnless(u._checkCapability('-bar'))
self.failIf(u._checkCapability('bar'))
u.removeCapability('foo')
u.removeCapability('-bar')
self.assertRaises(KeyError, u._checkCapability, 'foo')
self.assertRaises(KeyError, u._checkCapability, '-bar')
def testAddhostmask(self):
u = ircdb.IrcUser()
self.assertRaises(ValueError, u.addHostmask, '*!*@*')
def testRemoveHostmask(self):
u = ircdb.IrcUser()
u.addHostmask('foo!bar@baz')
self.failUnless(u.checkHostmask('foo!bar@baz'))
u.addHostmask('foo!bar@baz')
u.removeHostmask('foo!bar@baz')
self.failIf(u.checkHostmask('foo!bar@baz'))
def testOwner(self):
u = ircdb.IrcUser()
u.addCapability('owner')
self.failUnless(u._checkCapability('foo'))
self.failIf(u._checkCapability('-foo'))
def testInitCapabilities(self):
u = ircdb.IrcUser(capabilities=['foo'])
self.failUnless(u._checkCapability('foo'))
def testPassword(self):
u = ircdb.IrcUser()
u.setPassword('foobar')
self.failUnless(u.checkPassword('foobar'))
self.failIf(u.checkPassword('somethingelse'))
def testTimeoutAuth(self):
orig = conf.supybot.databases.users.timeoutIdentification()
try:
conf.supybot.databases.users.timeoutIdentification.setValue(2)
u = ircdb.IrcUser()
u.addAuth('foo!bar@baz')
self.failUnless(u.checkHostmask('foo!bar@baz'))
time.sleep(2.1)
self.failIf(u.checkHostmask('foo!bar@baz'))
finally:
conf.supybot.databases.users.timeoutIdentification.setValue(orig)
def testMultipleAuth(self):
orig = conf.supybot.databases.users.timeoutIdentification()
try:
conf.supybot.databases.users.timeoutIdentification.setValue(2)
u = ircdb.IrcUser()
u.addAuth('foo!bar@baz')
self.failUnless(u.checkHostmask('foo!bar@baz'))
u.addAuth('foo!bar@baz')
self.failUnless(u.checkHostmask('foo!bar@baz'))
self.failUnless(len(u.auth) == 1)
u.addAuth('boo!far@fizz')
self.failUnless(u.checkHostmask('boo!far@fizz'))
time.sleep(2.1)
self.failIf(u.checkHostmask('foo!bar@baz'))
self.failIf(u.checkHostmask('boo!far@fizz'))
finally:
conf.supybot.databases.users.timeoutIdentification.setValue(orig)
def testHashedPassword(self):
u = ircdb.IrcUser()
u.setPassword('foobar', hashed=True)
self.failUnless(u.checkPassword('foobar'))
self.failIf(u.checkPassword('somethingelse'))
self.assertNotEqual(u.password, 'foobar')
def testHostmasks(self):
prefix = 'foo12341234!bar@baz.domain.tld'
hostmasks = ['*!bar@baz.domain.tld', 'foo12341234!*@*']
u = ircdb.IrcUser()
self.failIf(u.checkHostmask(prefix))
for hostmask in hostmasks:
u.addHostmask(hostmask)
self.failUnless(u.checkHostmask(prefix))
def testAuth(self):
prefix = 'foo!bar@baz'
u = ircdb.IrcUser()
u.addAuth(prefix)
self.failUnless(u.auth)
u.clearAuth()
self.failIf(u.auth)
def testIgnore(self):
u = ircdb.IrcUser(ignore=True)
self.failIf(u._checkCapability('foo'))
self.failUnless(u._checkCapability('-foo'))
def testRemoveCapability(self):
u = ircdb.IrcUser(capabilities=('foo',))
self.assertRaises(KeyError, u.removeCapability, 'bar')
class IrcChannelTestCase(IrcdbTestCase):
def testInit(self):
c = ircdb.IrcChannel()
self.failIf(c._checkCapability('op'))
self.failIf(c._checkCapability('voice'))
self.failIf(c._checkCapability('halfop'))
self.failIf(c._checkCapability('protected'))
def testCapabilities(self):
c = ircdb.IrcChannel(defaultAllow=False)
self.failIf(c._checkCapability('foo'))
c.addCapability('foo')
self.failUnless(c._checkCapability('foo'))
c.removeCapability('foo')
self.failIf(c._checkCapability('foo'))
def testDefaultCapability(self):
c = ircdb.IrcChannel()
c.setDefaultCapability(False)
self.failIf(c._checkCapability('foo'))
self.failUnless(c._checkCapability('-foo'))
c.setDefaultCapability(True)
self.failUnless(c._checkCapability('foo'))
self.failIf(c._checkCapability('-foo'))
def testLobotomized(self):
c = ircdb.IrcChannel(lobotomized=True)
self.failUnless(c.checkIgnored('foo!bar@baz'))
def testIgnored(self):
prefix = 'foo!bar@baz'
banmask = ircutils.banmask(prefix)
c = ircdb.IrcChannel()
self.failIf(c.checkIgnored(prefix))
c.addIgnore(banmask)
self.failUnless(c.checkIgnored(prefix))
c.removeIgnore(banmask)
self.failIf(c.checkIgnored(prefix))
c.addBan(banmask)
self.failUnless(c.checkIgnored(prefix))
c.removeBan(banmask)
self.failIf(c.checkIgnored(prefix))
class UsersDictionaryTestCase(IrcdbTestCase):
filename = os.path.join(conf.supybot.directories.conf(),
'UsersDictionaryTestCase.conf')
def setUp(self):
try:
os.remove(self.filename)
except:
pass
self.users = ircdb.UsersDictionary()
IrcdbTestCase.setUp(self)
def testIterAndNumUsers(self):
self.assertEqual(self.users.numUsers(), 0)
u = self.users.newUser()
hostmask = 'foo!xyzzy@baz.domain.com'
banmask = ircutils.banmask(hostmask)
u.addHostmask(banmask)
u.name = 'foo'
self.users.setUser(u)
self.assertEqual(self.users.numUsers(), 1)
u = self.users.newUser()
hostmask = 'biff!fladksfj@blakjdsf'
banmask = ircutils.banmask(hostmask)
u.addHostmask(banmask)
u.name = 'biff'
self.users.setUser(u)
self.assertEqual(self.users.numUsers(), 2)
self.users.delUser(2)
self.assertEqual(self.users.numUsers(), 1)
self.users.delUser(1)
self.assertEqual(self.users.numUsers(), 0)
def testGetSetDelUser(self):
self.assertRaises(KeyError, self.users.getUser, 'foo')
self.assertRaises(KeyError,
self.users.getUser, 'foo!xyzzy@baz.domain.com')
u = self.users.newUser()
hostmask = 'foo!xyzzy@baz.domain.com'
banmask = ircutils.banmask(hostmask)
u.addHostmask(banmask)
u.addHostmask(hostmask)
u.name = 'foo'
self.users.setUser(u)
self.assertEqual(self.users.getUser('foo'), u)
self.assertEqual(self.users.getUser('FOO'), u)
self.assertEqual(self.users.getUser(hostmask), u)
self.assertEqual(self.users.getUser(banmask), u)
# The UsersDictionary shouldn't allow users to be added whose hostmasks
# match another user's already in the database.
u2 = self.users.newUser()
u2.addHostmask('*!xyzzy@baz.domain.c?m')
self.assertRaises(ValueError, self.users.setUser, u2)
class CheckCapabilityTestCase(IrcdbTestCase):
filename = os.path.join(conf.supybot.directories.conf(),
'CheckCapabilityTestCase.conf')
owner = 'owner!owner@owner'
nothing = 'nothing!nothing@nothing'
justfoo = 'justfoo!justfoo@justfoo'
antifoo = 'antifoo!antifoo@antifoo'
justchanfoo = 'justchanfoo!justchanfoo@justchanfoo'
antichanfoo = 'antichanfoo!antichanfoo@antichanfoo'
securefoo = 'securefoo!securefoo@securefoo'
channel = '#channel'
cap = 'foo'
anticap = ircdb.makeAntiCapability(cap)
chancap = ircdb.makeChannelCapability(channel, cap)
antichancap = ircdb.makeAntiCapability(chancap)
chanop = ircdb.makeChannelCapability(channel, 'op')
channelnothing = ircdb.IrcChannel()
channelcap = ircdb.IrcChannel()
channelcap.addCapability(cap)
channelanticap = ircdb.IrcChannel()
channelanticap.addCapability(anticap)
def setUp(self):
IrcdbTestCase.setUp(self)
try:
os.remove(self.filename)
except:
pass
self.users = ircdb.UsersDictionary()
#self.users.open(self.filename)
self.channels = ircdb.ChannelsDictionary()
#self.channels.open(self.filename)
owner = self.users.newUser()
owner.name = 'owner'
owner.addCapability('owner')
owner.addHostmask(self.owner)
self.users.setUser(owner)
nothing = self.users.newUser()
nothing.name = 'nothing'
nothing.addHostmask(self.nothing)
self.users.setUser(nothing)
justfoo = self.users.newUser()
justfoo.name = 'justfoo'
justfoo.addCapability(self.cap)
justfoo.addHostmask(self.justfoo)
self.users.setUser(justfoo)
antifoo = self.users.newUser()
antifoo.name = 'antifoo'
antifoo.addCapability(self.anticap)
antifoo.addHostmask(self.antifoo)
self.users.setUser(antifoo)
justchanfoo = self.users.newUser()
justchanfoo.name = 'justchanfoo'
justchanfoo.addCapability(self.chancap)
justchanfoo.addHostmask(self.justchanfoo)
self.users.setUser(justchanfoo)
antichanfoo = self.users.newUser()
antichanfoo.name = 'antichanfoo'
antichanfoo.addCapability(self.antichancap)
antichanfoo.addHostmask(self.antichanfoo)
self.users.setUser(antichanfoo)
securefoo = self.users.newUser()
securefoo.name = 'securefoo'
securefoo.addCapability(self.cap)
securefoo.secure = True
securefoo.addHostmask(self.securefoo)
self.users.setUser(securefoo)
channel = ircdb.IrcChannel()
self.channels.setChannel(self.channel, channel)
def checkCapability(self, hostmask, capability):
return ircdb.checkCapability(hostmask, capability,
self.users, self.channels)
def testOwner(self):
self.failUnless(self.checkCapability(self.owner, self.cap))
self.failIf(self.checkCapability(self.owner, self.anticap))
self.failUnless(self.checkCapability(self.owner, self.chancap))
self.failIf(self.checkCapability(self.owner, self.antichancap))
self.channels.setChannel(self.channel, self.channelanticap)
self.failUnless(self.checkCapability(self.owner, self.cap))
self.failIf(self.checkCapability(self.owner, self.anticap))
def testNothingAgainstChannel(self):
self.channels.setChannel(self.channel, self.channelnothing)
self.assertEqual(self.checkCapability(self.nothing, self.chancap),
self.channelnothing.defaultAllow)
self.channelnothing.defaultAllow = not self.channelnothing.defaultAllow
self.channels.setChannel(self.channel, self.channelnothing)
self.assertEqual(self.checkCapability(self.nothing, self.chancap),
self.channelnothing.defaultAllow)
self.channels.setChannel(self.channel, self.channelcap)
self.failUnless(self.checkCapability(self.nothing, self.chancap))
self.failIf(self.checkCapability(self.nothing, self.antichancap))
self.channels.setChannel(self.channel, self.channelanticap)
self.failIf(self.checkCapability(self.nothing, self.chancap))
self.failUnless(self.checkCapability(self.nothing, self.antichancap))
def testNothing(self):
self.assertEqual(self.checkCapability(self.nothing, self.cap),
conf.supybot.capabilities.default())
self.assertEqual(self.checkCapability(self.nothing, self.anticap),
not conf.supybot.capabilities.default())
def testJustFoo(self):
self.failUnless(self.checkCapability(self.justfoo, self.cap))
self.failIf(self.checkCapability(self.justfoo, self.anticap))
def testAntiFoo(self):
self.failUnless(self.checkCapability(self.antifoo, self.anticap))
self.failIf(self.checkCapability(self.antifoo, self.cap))
def testJustChanFoo(self):
self.channels.setChannel(self.channel, self.channelnothing)
self.failUnless(self.checkCapability(self.justchanfoo, self.chancap))
self.failIf(self.checkCapability(self.justchanfoo, self.antichancap))
self.channelnothing.defaultAllow = not self.channelnothing.defaultAllow
self.failUnless(self.checkCapability(self.justchanfoo, self.chancap))
self.failIf(self.checkCapability(self.justchanfoo, self.antichancap))
self.channels.setChannel(self.channel, self.channelanticap)
self.failUnless(self.checkCapability(self.justchanfoo, self.chancap))
self.failIf(self.checkCapability(self.justchanfoo, self.antichancap))
def testChanOpCountsAsEverything(self):
self.channels.setChannel(self.channel, self.channelanticap)
id = self.users.getUserId('nothing')
u = self.users.getUser(id)
u.addCapability(self.chanop)
self.users.setUser(u)
self.failUnless(self.checkCapability(self.nothing, self.chancap))
self.channels.setChannel(self.channel, self.channelnothing)
self.failUnless(self.checkCapability(self.nothing, self.chancap))
self.channelnothing.defaultAllow = not self.channelnothing.defaultAllow
self.failUnless(self.checkCapability(self.nothing, self.chancap))
def testAntiChanFoo(self):
self.channels.setChannel(self.channel, self.channelnothing)
self.failIf(self.checkCapability(self.antichanfoo, self.chancap))
self.failUnless(self.checkCapability(self.antichanfoo,
self.antichancap))
def testSecurefoo(self):
self.failUnless(self.checkCapability(self.securefoo, self.cap))
id = self.users.getUserId(self.securefoo)
u = self.users.getUser(id)
u.addAuth(self.securefoo)
self.users.setUser(u)
try:
originalConfDefaultAllow = conf.supybot.capabilities.default()
conf.supybot.capabilities.default.set('False')
self.failIf(self.checkCapability('a' + self.securefoo, self.cap))
finally:
conf.supybot.capabilities.default.set(str(originalConfDefaultAllow))
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
{
"content_hash": "ea17e6c8b4d504bc291962e5596b5f36",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 80,
"avg_line_length": 38.77695167286245,
"alnum_prop": 0.6355095388745087,
"repo_name": "ProgVal/Limnoria-test",
"id": "0a505cd22eb1cdeb83ff9008284bf96c14ac4111",
"size": "22451",
"binary": false,
"copies": "5",
"ref": "refs/heads/debug-pypy-sqlite",
"path": "test/test_ircdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "2591313"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
}
|
import os,sys
import math, re
if len(sys.argv) < 4:
print 'Use: splitProposalsFile.py bboxesFile parts output'
sys.exit()
bboxes = [x for x in open(sys.argv[1])]
parts = int(sys.argv[2])
outFile = sys.argv[3] + '/part.'
##################################
# Organize boxes by source image
##################################
images = {}
r = re.compile(r"^([^\s]+)")
for box in bboxes:
name = r.match(box).group()
try:
images[ name ].append(box)
except:
images[ name ] = [box]
print 'File parsed'
##################################
# Write chuncks of files
##################################
imagesPerPart = math.ceil( float(len(images.keys())) / float(parts) )
print 'Preparing parts with',imagesPerPart,'images'
counter = 0
part = 0
currentFile = open( outFile+str(part), 'w' )
for i in images.keys():
for box in images[i]:
currentFile.write(box)
counter += 1
if counter == imagesPerPart:
currentFile.close()
part += 1
counter = 0
currentFile = open( outFile+str(part), 'w' )
print 'Total parts:',part
currentFile.close()
|
{
"content_hash": "ca97bd4e37ad93c6b4a970d983c6d5f9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 69,
"avg_line_length": 24.88372093023256,
"alnum_prop": 0.5710280373831775,
"repo_name": "jccaicedo/localization-agent",
"id": "ecc26989c694dffaf1e0b0867ecdba02e836eddf",
"size": "1070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/splitProposalsFile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "619"
},
{
"name": "Python",
"bytes": "391367"
},
{
"name": "Shell",
"bytes": "18874"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('crowdcop_web', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='campaign',
name='num_tips',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
|
{
"content_hash": "3d145f8d30696022e4556b0be23abf22",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 21.526315789473685,
"alnum_prop": 0.5843520782396088,
"repo_name": "bocaaust/CrowdCop",
"id": "7ef4cefc8c785d7f67ebb1df13849c3f37ced61c",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CrowdCop_test/crowdcop/crowdcop_web/migrations/0002_campaign_num_tips.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "28"
},
{
"name": "CSS",
"bytes": "104673"
},
{
"name": "HTML",
"bytes": "199204"
},
{
"name": "JavaScript",
"bytes": "282446"
},
{
"name": "PHP",
"bytes": "229374"
},
{
"name": "Python",
"bytes": "60610"
}
],
"symlink_target": ""
}
|
"""Python wrapper for prefetching_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.contrib.data.python.ops import contrib_op_loader # pylint: disable=unused-import
from tensorflow.contrib.data.python.ops import gen_dataset_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.eager import context
from tensorflow.python.framework import device as framework_device
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_dataset_ops as core_gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
def function_buffering_resource(string_arg,
target_device,
f,
buffer_size,
output_types,
container="",
shared_name=None,
name=None):
"""Creates a FunctionBufferingResource.
A FunctionBufferingResource fills up a buffer by calling a function `f` on
`target_device`. `f` should take in only a single string argument as input.
Args:
string_arg: The single string argument to the function.
target_device: The device to run `f` on.
f: The function to be executed.
buffer_size: Size of the buffer to be populated.
output_types: The output types generated by the function.
container: (Optional) string. Defaults to "".
shared_name: (Optional) string.
name: (Optional) string to name the op.
Returns:
Handle to a FunctionBufferingResource.
"""
if shared_name is None:
shared_name = ""
return gen_dataset_ops.function_buffering_resource(
string_arg=string_arg,
target_device=target_device,
shared_name=shared_name,
f=f,
buffer_size=buffer_size,
container=container,
name=name,
output_types=output_types)
def function_buffering_resource_get_next(function_buffer_resource,
output_types,
name=None):
return gen_dataset_ops.function_buffering_resource_get_next(
function_buffer_resource=function_buffer_resource,
output_types=output_types,
name=name)
def function_buffering_resource_reset(function_buffer_resource, name=None):
return gen_dataset_ops.function_buffering_resource_reset(
function_buffer_resource=function_buffer_resource, name=name)
# pylint: disable=protected-access
class _PrefetchToDeviceIterator(object):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
Args:
input_dataset: The input dataset
one_shot: If true, we make a one shot iterator that's already initialized.
device: A fully specified device string where we want to prefetch to
buffer_size: Size of the prefetching buffer.
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An Iterator type object.
"""
def __init__(self,
input_dataset,
one_shot,
device,
buffer_size,
shared_name=None):
self._input_dataset = input_dataset
self._get_next_call_count = 0
self._one_shot = one_shot
if shared_name is None:
shared_name = ""
if self._one_shot:
self._input_iterator = input_dataset.make_one_shot_iterator()
else:
self._input_iterator = iterator_ops.Iterator.from_structure(
self._input_dataset.output_types, self._input_dataset.output_shapes,
shared_name, self._input_dataset.output_classes)
input_iterator_handle = self._input_iterator.string_handle()
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
"""Prefetches one element from `input_iterator`."""
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, self._input_iterator.output_types,
self._input_iterator.output_shapes,
self._input_iterator.output_classes)
ret = remote_iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
iterator_device = gen_dataset_ops.iterator_get_device(
self._input_iterator._iterator_resource)
with ops.device(device):
self._buffering_resource = function_buffering_resource(
f=_prefetch_fn,
target_device=iterator_device,
string_arg=input_iterator_handle,
buffer_size=buffer_size,
shared_name=shared_name,
output_types=nest.flatten(
sparse.as_dense_types(self._input_dataset.output_types,
self._input_dataset.output_classes)))
if not self._one_shot:
reset_op = function_buffering_resource_reset(self._buffering_resource)
with ops.control_dependencies([reset_op]):
self._initializer = self._input_iterator.make_initializer(
self._input_dataset)
def get_next(self, name=None):
"""See @{tf.data.Iterator.get_next}."""
self._get_next_call_count += 1
if self._get_next_call_count > iterator_ops.GET_NEXT_CALL_WARNING_THRESHOLD:
warnings.warn(iterator_ops.GET_NEXT_CALL_WARNING_MESSAGE)
flat_ret = gen_dataset_ops.function_buffering_resource_get_next(
self._buffering_resource,
output_types=nest.flatten(sparse.as_dense_types(
self.output_types, self.output_classes)), name=name)
ret = sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self.output_types, flat_ret),
self.output_types, self.output_shapes, self.output_classes)
for tensor, shape in zip(
nest.flatten(ret), nest.flatten(self.output_shapes)):
if isinstance(tensor, ops.Tensor):
tensor.set_shape(shape)
return ret
@property
def initializer(self):
if self._one_shot:
raise NotImplementedError("Can't initialize a one_shot_iterator")
return self._initializer
@property
def output_classes(self):
return self._input_dataset.output_classes
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_types(self):
return self._input_dataset.output_types
class _PrefetchToDeviceEagerIterator(iterator_ops.EagerIterator):
"""A replacement for @{tf.data.Iterator} that prefetches to another device.
Args:
input_dataset: The input dataset
one_shot: If true, we make a one shot iterator that's already initialized.
device: A fully specified device string where we want to prefetch to
buffer_size: Size of the prefetching buffer.
shared_name: (Optional.) If non-empty, the returned iterator will be
shared under the given name across multiple sessions that share the
same devices (e.g. when using a remote server).
Returns:
An Iterator type object.
"""
def __init__(self,
input_dataset,
device,
buffer_size):
with ops.device("/device:CPU:0"):
super(_PrefetchToDeviceEagerIterator, self).__init__(input_dataset)
input_iterator_handle = core_gen_dataset_ops.iterator_to_string_handle(
self._resource)
self._device = device
@function.Defun(dtypes.string)
def _prefetch_fn(handle):
"""Prefetches one element from `input_iterator`."""
remote_iterator = iterator_ops.Iterator.from_string_handle(
handle, self.output_types, self.output_shapes, self.output_classes)
ret = remote_iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
_prefetch_fn.add_to_graph(None)
with ops.device(device):
self._buffering_resource = function_buffering_resource(
f=_prefetch_fn,
output_types=self._flat_output_types,
target_device=gen_dataset_ops.iterator_get_device(self._resource),
string_arg=input_iterator_handle,
buffer_size=buffer_size,
shared_name=iterator_ops._generate_shared_name(
"function_buffer_resource"))
def _next_internal(self):
"""Returns a nested structure of `tf.Tensor`s containing the next element.
"""
# This runs in sync mode as iterators use an error status to communicate
# that there is no more data to iterate over.
# TODO(b/77291417): Fix
with context.execution_mode(context.SYNC):
with ops.device(self._device):
ret = gen_dataset_ops.function_buffering_resource_get_next(
function_buffer_resource=self._buffering_resource,
output_types=self._flat_output_types)
return sparse.deserialize_sparse_tensors(
nest.pack_sequence_as(self._output_types, ret), self._output_types,
self._output_shapes, self._output_classes)
# pylint: enable=protected-access
class _PrefetchToDeviceDataset(dataset_ops.Dataset):
"""A `Dataset` whose iterator prefetches elements to another device."""
def __init__(self, input_dataset, device, buffer_size):
self._input_dataset = input_dataset
self._device = device
self._buffer_size = buffer_size if buffer_size is not None else 1
# The static analysis cannot tell that the eager iterator's superclass has
# a `next()` method.
# pylint: disable=non-iterator-returned
def __iter__(self):
"""Creates an `Iterator` for enumerating the elements of this dataset.
The returned iterator implements the Python iterator protocol and therefore
can only be used in eager mode.
Returns:
An `Iterator` over the elements of this dataset.
Raises:
RuntimeError: If eager execution is enabled.
"""
if context.executing_eagerly():
return _PrefetchToDeviceEagerIterator(self._input_dataset, self._device,
self._buffer_size)
else:
raise RuntimeError("dataset.__iter__() is only supported when eager "
"execution is enabled.")
# pylint: enable=non-iterator-returned
def make_one_shot_iterator(self):
if context.executing_eagerly():
return _PrefetchToDeviceEagerIterator(self._input_dataset, self._device,
self._buffer_size)
else:
return _PrefetchToDeviceIterator(self._input_dataset, one_shot=True,
device=self._device,
buffer_size=self._buffer_size)
def make_initializable_iterator(self, shared_name=None):
return _PrefetchToDeviceIterator(
self._input_dataset,
one_shot=False,
device=self._device,
buffer_size=self._buffer_size,
shared_name=shared_name)
def _as_variant_tensor(self):
# TODO(mrry): Raise this error earlier (e.g. when one of the Dataset
# transformation methods is called.
# TODO(mrry): Investigate support for chaining further transformations after
# the prefetch, including GPU support.
raise NotImplementedError("`prefetch_to_device()` must be the last "
"transformation in a dataset pipeline.")
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_classes(self):
return self._input_dataset.output_classes
def prefetch_to_device(device, buffer_size=None):
"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a @{tf.data.Dataset}, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _PrefetchToDeviceDataset(dataset, device, buffer_size)
return _apply_fn
def copy_to_device(target_device, source_device="/cpu:0"):
"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
@{tf.data.Dataset.apply}.
"""
def _apply_fn(dataset):
return _CopyToDeviceDataset(
dataset, target_device=target_device, source_device=source_device)
return _apply_fn
# TODO(rohanj): Use the _input_hostmem attr on the RemoteCall ops to indicate
# all inputs to the Op are in host memory, thereby avoiding some unnecessary
# Sends and Recvs.
class _CopyToDeviceDataset(dataset_ops.Dataset):
"""A `Dataset` that copies elements to another device."""
def __init__(self, input_dataset, target_device, source_device="/cpu:0"):
"""Constructs a _CopyToDeviceDataset.
Args:
input_dataset: `Dataset` to be copied
target_device: The name of the device to which elements would be copied.
source_device: Device where input_dataset would be placed.
"""
self._input_dataset = input_dataset
self._target_device = target_device
spec = framework_device.DeviceSpec().from_string(self._target_device)
self._is_gpu_target = (spec.device_type == "GPU")
self._source_device_string = source_device
self._source_device = ops.convert_to_tensor(source_device)
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._input_dataset.output_shapes,
self._input_dataset.output_classes))
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._input_dataset.output_types,
self._input_dataset.output_classes))
@function.Defun()
def _init_func():
"""Creates an iterator for the input dataset.
Returns:
A `string` tensor that encapsulates the iterator created.
"""
# pylint: disable=protected-access
ds_variant = self._input_dataset._as_variant_tensor()
resource = core_gen_dataset_ops.anonymous_iterator(
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
with ops.control_dependencies(
[core_gen_dataset_ops.make_iterator(ds_variant, resource)]):
return core_gen_dataset_ops.iterator_to_string_handle(resource)
@function.Defun()
def _remote_init_func():
return functional_ops.remote_call(
target=self._source_device,
args=_init_func.captured_inputs,
Tout=[dtypes.string],
f=_init_func)
self._init_func = _remote_init_func
self._init_captured_args = _remote_init_func.captured_inputs
@function.Defun(dtypes.string)
def _next_func(string_handle):
"""Calls get_next for created iterator.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
The elements generated from `input_dataset`
"""
with ops.device(self._source_device_string):
iterator = iterator_ops.Iterator.from_string_handle(
string_handle, self.output_types, self.output_shapes,
self.output_classes)
ret = iterator.get_next()
return nest.flatten(sparse.serialize_sparse_tensors(ret))
@function.Defun(dtypes.string)
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + _next_func.captured_inputs,
Tout=self._flat_output_types,
f=_next_func)
self._next_func = _remote_next_func
self._next_captured_args = _remote_next_func.captured_inputs
@function.Defun(dtypes.string)
def _finalize_func(string_handle):
"""Destroys the iterator resource created.
Args:
string_handle: An iterator string handle created by _init_func
Returns:
Tensor constant 0
"""
iterator_resource = core_gen_dataset_ops.iterator_from_string_handle_v2(
string_handle,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
with ops.control_dependencies([
resource_variable_ops.destroy_resource_op(
iterator_resource, ignore_lookup_error=True)]):
return array_ops.constant(0, dtypes.int64)
@function.Defun(dtypes.string)
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=self._source_device,
args=[string_handle] + _finalize_func.captured_inputs,
Tout=[dtypes.int64],
f=_finalize_func)
self._finalize_func = _remote_finalize_func
self._finalize_captured_args = _remote_finalize_func.captured_inputs
g = ops.get_default_graph()
_remote_init_func.add_to_graph(g)
_remote_next_func.add_to_graph(g)
_remote_finalize_func.add_to_graph(g)
# pylint: enable=protected-scope
# The one_shot_iterator implementation needs a 0 arg _make_dataset function
# that thereby captures all the inputs required to create the dataset. Since
# there are strings that are inputs to the GeneratorDataset which can't be
# placed on a GPU, this fails for the GPU case. Therefore, disabling it for
# GPU
def make_one_shot_iterator(self):
if self._is_gpu_target:
raise ValueError("Cannot create a one shot iterator when using "
"`tf.contrib.data.copy_to_device()` on GPU. Please use "
"`Dataset.make_initializable_iterator()` instead.")
else:
return super(_CopyToDeviceDataset, self).make_one_shot_iterator()
def _as_variant_tensor(self):
with ops.device(self._target_device):
return core_gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
@property
def output_types(self):
return self._input_dataset.output_types
@property
def output_shapes(self):
return self._input_dataset.output_shapes
@property
def output_classes(self):
return self._input_dataset.output_classes
class _PerDeviceGenerator(dataset_ops.Dataset):
"""A `dummy` generator dataset."""
def __init__(self, shard_num, multi_device_iterator_resource, incarnation_id,
source_device, target_device, output_shapes, output_types,
output_classes):
self._target_device = target_device
self._output_types = output_types
self._output_shapes = output_shapes
self._output_classes = output_classes
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._output_shapes, self._output_classes))
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes))
multi_device_iterator_string_handle = (
gen_dataset_ops.multi_device_iterator_to_string_handle(
multi_device_iterator_resource))
@function.Defun()
def _init_func():
return multi_device_iterator_string_handle
@function.Defun()
def _remote_init_func():
return functional_ops.remote_call(
target=source_device,
args=_init_func.captured_inputs,
Tout=[dtypes.string],
f=_init_func)
self._init_func = _remote_init_func
self._init_captured_args = _remote_init_func.captured_inputs
@function.Defun(dtypes.string)
def _next_func(string_handle):
multi_device_iterator = (
gen_dataset_ops.multi_device_iterator_from_string_handle(
string_handle=string_handle,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes))
return gen_dataset_ops.multi_device_iterator_get_next_from_shard(
multi_device_iterator=multi_device_iterator,
shard_num=shard_num,
incarnation_id=incarnation_id,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
@function.Defun(dtypes.string)
def _remote_next_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + _next_func.captured_inputs,
Tout=self._flat_output_types,
f=_next_func)
self._next_func = _remote_next_func
self._next_captured_args = _remote_next_func.captured_inputs
@function.Defun(dtypes.string)
def _finalize_func(unused_string_handle):
return array_ops.constant(0, dtypes.int64)
@function.Defun(dtypes.string)
def _remote_finalize_func(string_handle):
return functional_ops.remote_call(
target=source_device,
args=[string_handle] + _finalize_func.captured_inputs,
Tout=[dtypes.int64],
f=_finalize_func)
self._finalize_func = _remote_finalize_func
self._finalize_captured_args = _remote_finalize_func.captured_inputs
def _as_variant_tensor(self):
with ops.device(self._target_device):
return core_gen_dataset_ops.generator_dataset(
self._init_captured_args,
self._next_captured_args,
self._finalize_captured_args,
init_func=self._init_func,
next_func=self._next_func,
finalize_func=self._finalize_func,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
@property
def output_types(self):
return self._output_types
@property
def output_shapes(self):
return self._output_shapes
@property
def output_classes(self):
return self._output_classes
class MultiDeviceIterator(object):
"""An iterator over multiple devices."""
def __init__(self,
dataset,
devices,
prefetch_buffer_size=1,
source_device="/cpu:0"):
self._dataset = dataset
self._devices = devices
self._source_device = source_device
self._source_device_tensor = ops.convert_to_tensor(source_device)
self._flat_output_shapes = nest.flatten(
sparse.as_dense_shapes(self._dataset.output_shapes,
self._dataset.output_classes))
self._flat_output_types = nest.flatten(
sparse.as_dense_types(self._dataset.output_types,
self._dataset.output_classes))
# Create the MultiDeviceIterator.
with ops.device(self._source_device):
self._multi_device_iterator_resource = (
gen_dataset_ops.multi_device_iterator(
devices=self._devices,
shared_name="",
container="",
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes))
# The incarnation ID is used to ensure consistency between the per-device
# iterators and the multi-device iterator.
self._incarnation_id = gen_dataset_ops.multi_device_iterator_init(
self._dataset._as_variant_tensor(), # pylint: disable=protected-access
self._multi_device_iterator_resource)
# TODO(rohanj): Explore the possibility of the MultiDeviceIterator to
# initialize the device side of the pipeline. This would allow the
# MultiDeviceIterator to choose, for example, to move some transformations
# into the device side from its input. It might be useful in rewriting.
# Create the per device iterators.
self._device_iterators = []
i = 0
for device in self._devices:
ds = _PerDeviceGenerator(
i, self._multi_device_iterator_resource, self._incarnation_id,
self._source_device_tensor, device, self._dataset.output_shapes,
self._dataset.output_types, self._dataset.output_classes)
ds = ds.prefetch(prefetch_buffer_size)
with ops.device(device):
self._device_iterators.append(ds.make_initializable_iterator())
i += 1
device_iterator_initializers = [
iterator.initializer for iterator in self._device_iterators
]
self._initializer = control_flow_ops.group(*device_iterator_initializers)
def get_next(self):
result = []
i = 0
for device in self._devices:
with ops.device(device):
result.append(self._device_iterators[i].get_next())
i += 1
return result
@property
def initializer(self):
return self._initializer
|
{
"content_hash": "cc91715f6c30d9dce839b595a3b93906",
"timestamp": "",
"source": "github",
"line_count": 683,
"max_line_length": 97,
"avg_line_length": 36.92972181551976,
"alnum_prop": 0.6630852793085675,
"repo_name": "aselle/tensorflow",
"id": "0edd7c9fe974784f199c272a649b302e72d8c218",
"size": "25912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/data/python/ops/prefetching_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "321697"
},
{
"name": "C#",
"bytes": "7259"
},
{
"name": "C++",
"bytes": "46003590"
},
{
"name": "CMake",
"bytes": "207738"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "829230"
},
{
"name": "Jupyter Notebook",
"bytes": "2578736"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52243"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99265"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39898642"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "447009"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
}
|
"""Support for OASA Telematics from telematics.oasa.gr."""
from __future__ import annotations
from datetime import timedelta
import logging
from operator import itemgetter
import oasatelematics
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorDeviceClass,
SensorEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_STOP_ID = "stop_id"
ATTR_STOP_NAME = "stop_name"
ATTR_ROUTE_ID = "route_id"
ATTR_ROUTE_NAME = "route_name"
ATTR_NEXT_ARRIVAL = "next_arrival"
ATTR_SECOND_NEXT_ARRIVAL = "second_next_arrival"
ATTR_NEXT_DEPARTURE = "next_departure"
ATTRIBUTION = "Data retrieved from telematics.oasa.gr"
CONF_STOP_ID = "stop_id"
CONF_ROUTE_ID = "route_id"
DEFAULT_NAME = "OASA Telematics"
ICON = "mdi:bus"
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Required(CONF_ROUTE_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the OASA Telematics sensor."""
name = config[CONF_NAME]
stop_id = config[CONF_STOP_ID]
route_id = config.get(CONF_ROUTE_ID)
data = OASATelematicsData(stop_id, route_id)
add_entities([OASATelematicsSensor(data, stop_id, route_id, name)], True)
class OASATelematicsSensor(SensorEntity):
"""Implementation of the OASA Telematics sensor."""
def __init__(self, data, stop_id, route_id, name):
"""Initialize the sensor."""
self.data = data
self._name = name
self._stop_id = stop_id
self._route_id = route_id
self._name_data = self._times = self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_class(self):
"""Return the class of this sensor."""
return SensorDeviceClass.TIMESTAMP
@property
def native_value(self):
"""Return the state of the sensor."""
return self._state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
params = {}
if self._times is not None:
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
next_arrival = next_arrival_data[ATTR_NEXT_ARRIVAL]
params.update({ATTR_NEXT_ARRIVAL: next_arrival.isoformat()})
if len(self._times) > 1:
second_next_arrival_time = self._times[1][ATTR_NEXT_ARRIVAL]
if second_next_arrival_time is not None:
second_arrival = second_next_arrival_time
params.update(
{ATTR_SECOND_NEXT_ARRIVAL: second_arrival.isoformat()}
)
params.update(
{
ATTR_ROUTE_ID: self._times[0][ATTR_ROUTE_ID],
ATTR_STOP_ID: self._stop_id,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
)
params.update(
{
ATTR_ROUTE_NAME: self._name_data[ATTR_ROUTE_NAME],
ATTR_STOP_NAME: self._name_data[ATTR_STOP_NAME],
}
)
return {k: v for k, v in params.items() if v}
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self) -> None:
"""Get the latest data from OASA API and update the states."""
self.data.update()
self._times = self.data.info
self._name_data = self.data.name_data
next_arrival_data = self._times[0]
if ATTR_NEXT_ARRIVAL in next_arrival_data:
self._state = next_arrival_data[ATTR_NEXT_ARRIVAL]
class OASATelematicsData:
"""The class for handling data retrieval."""
def __init__(self, stop_id, route_id):
"""Initialize the data object."""
self.stop_id = stop_id
self.route_id = route_id
self.info = self.empty_result()
self.oasa_api = oasatelematics
self.name_data = {
ATTR_ROUTE_NAME: self.get_route_name(),
ATTR_STOP_NAME: self.get_stop_name(),
}
def empty_result(self):
"""Object returned when no arrivals are found."""
return [{ATTR_ROUTE_ID: self.route_id}]
def get_route_name(self):
"""Get the route name from the API."""
try:
route = self.oasa_api.getRouteName(self.route_id)
if route:
return route[0].get("route_departure_eng")
except TypeError:
_LOGGER.error("Cannot get route name from OASA API")
return None
def get_stop_name(self):
"""Get the stop name from the API."""
try:
name_data = self.oasa_api.getStopNameAndXY(self.stop_id)
if name_data:
return name_data[0].get("stop_descr_matrix_eng")
except TypeError:
_LOGGER.error("Cannot get stop name from OASA API")
return None
def update(self):
"""Get the latest arrival data from telematics.oasa.gr API."""
self.info = []
results = self.oasa_api.getStopArrivals(self.stop_id)
if not results:
self.info = self.empty_result()
return
# Parse results
results = [r for r in results if r.get("route_code") in self.route_id]
current_time = dt_util.utcnow()
for result in results:
if (btime2 := result.get("btime2")) is not None:
arrival_min = int(btime2)
timestamp = current_time + timedelta(minutes=arrival_min)
arrival_data = {
ATTR_NEXT_ARRIVAL: timestamp,
ATTR_ROUTE_ID: self.route_id,
}
self.info.append(arrival_data)
if not self.info:
_LOGGER.debug("No arrivals with given parameters")
self.info = self.empty_result()
return
# Sort the data by time
sort = sorted(self.info, key=itemgetter(ATTR_NEXT_ARRIVAL))
self.info = sort
|
{
"content_hash": "bd73fe7b84530a95d8453cbd2ebbe1b4",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 78,
"avg_line_length": 31.86602870813397,
"alnum_prop": 0.598048048048048,
"repo_name": "nkgilley/home-assistant",
"id": "3cb624190e77cdc8709fe5aee155a653c6498abb",
"size": "6660",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/oasa_telematics/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import os.path
import djcelery
djcelery.setup_loader()
PROJECT_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
TESTING = False
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/Kiev'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '(g!ga4o4v4+3y0!*iyc&ybi)i3&2jm-=$7amkaxn%0n#p*(id('
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
ROOT_URLCONF = 'gitality.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'gitality.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'core',
'commits',
'achievements',
'progresses',
'projects',
# Third-party
'djcelery',
'social_auth',
'south',
)
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# Celery settings
CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
CELERY_RESULT_BACKEND = 'redis://{0}:{1}'.format(REDIS_HOST, REDIS_PORT)
BROKER_URL = CELERY_RESULT_BACKEND
AUTHENTICATION_BACKENDS = (
'social_auth.backends.contrib.github.GithubBackend',
'django.contrib.auth.backends.ModelBackend',
)
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/error/'
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'achievements': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
}
}
# Provide it in local_settings
GITHUB_APP_ID = ''
GITHUB_API_SECRET = ''
GITHUB_BOT_NAME = ''
GITHUB_BOT_PASSWORD = ''
# Disabling cache by default
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
# Loading local settings
try:
from local_settings import *
except ImportError:
pass
del os
|
{
"content_hash": "f84959d0862a025bbda7378d81df886c",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 29.052380952380954,
"alnum_prop": 0.6913620717915095,
"repo_name": "dmrz/gitality",
"id": "8e8df474090ad1dad68929b576cbc017ea5e1573",
"size": "6141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitality/gitality/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1353"
},
{
"name": "JavaScript",
"bytes": "306"
},
{
"name": "Python",
"bytes": "30427"
},
{
"name": "Shell",
"bytes": "318"
}
],
"symlink_target": ""
}
|
"""Cloud Spanner API package."""
import pkg_resources
__version__ = pkg_resources.get_distribution('google-cloud-spanner').version
from google.cloud.spanner.client import Client
from google.cloud.spanner.keyset import KeyRange
from google.cloud.spanner.keyset import KeySet
from google.cloud.spanner.pool import AbstractSessionPool
from google.cloud.spanner.pool import BurstyPool
from google.cloud.spanner.pool import FixedSizePool
__all__ = ['__version__', 'AbstractSessionPool', 'BurstyPool', 'Client',
'FixedSizePool', 'KeyRange', 'KeySet']
|
{
"content_hash": "4fbcd6a5c18c39179256969a1892c7f1",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 29.68421052631579,
"alnum_prop": 0.7641843971631206,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "31913d8b1202fbe786c481502811c38bd5e7265c",
"size": "1161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/google/cloud/spanner/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
Test for the SmartThings switch platform.
The only mocking required is of the underlying SmartThings API object so
real HTTP calls are not initiated during testing.
"""
from pysmartthings import Attribute, Capability
from homeassistant.components.smartthings.const import DOMAIN, SIGNAL_SMARTTHINGS_UPDATE
from homeassistant.components.switch import (
ATTR_CURRENT_POWER_W,
ATTR_TODAY_ENERGY_KWH,
DOMAIN as SWITCH_DOMAIN,
)
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import STATE_UNAVAILABLE
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.dispatcher import async_dispatcher_send
from .conftest import setup_platform
async def test_entity_and_device_attributes(hass, device_factory):
"""Test the attributes of the entity are correct."""
# Arrange
device = device_factory("Switch_1", [Capability.switch], {Attribute.switch: "on"})
entity_registry = er.async_get(hass)
device_registry = dr.async_get(hass)
# Act
await setup_platform(hass, SWITCH_DOMAIN, devices=[device])
# Assert
entry = entity_registry.async_get("switch.switch_1")
assert entry
assert entry.unique_id == device.device_id
entry = device_registry.async_get_device({(DOMAIN, device.device_id)})
assert entry
assert entry.name == device.label
assert entry.model == device.device_type_name
assert entry.manufacturer == "Unavailable"
async def test_turn_off(hass, device_factory):
"""Test the switch turns of successfully."""
# Arrange
device = device_factory("Switch_1", [Capability.switch], {Attribute.switch: "on"})
await setup_platform(hass, SWITCH_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.switch_1"}, blocking=True
)
# Assert
state = hass.states.get("switch.switch_1")
assert state is not None
assert state.state == "off"
async def test_turn_on(hass, device_factory):
"""Test the switch turns of successfully."""
# Arrange
device = device_factory(
"Switch_1",
[Capability.switch, Capability.power_meter, Capability.energy_meter],
{Attribute.switch: "off", Attribute.power: 355, Attribute.energy: 11.422},
)
await setup_platform(hass, SWITCH_DOMAIN, devices=[device])
# Act
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.switch_1"}, blocking=True
)
# Assert
state = hass.states.get("switch.switch_1")
assert state is not None
assert state.state == "on"
assert state.attributes[ATTR_CURRENT_POWER_W] == 355
assert state.attributes[ATTR_TODAY_ENERGY_KWH] == 11.422
async def test_update_from_signal(hass, device_factory):
"""Test the switch updates when receiving a signal."""
# Arrange
device = device_factory("Switch_1", [Capability.switch], {Attribute.switch: "off"})
await setup_platform(hass, SWITCH_DOMAIN, devices=[device])
await device.switch_on(True)
# Act
async_dispatcher_send(hass, SIGNAL_SMARTTHINGS_UPDATE, [device.device_id])
# Assert
await hass.async_block_till_done()
state = hass.states.get("switch.switch_1")
assert state is not None
assert state.state == "on"
async def test_unload_config_entry(hass, device_factory):
"""Test the switch is removed when the config entry is unloaded."""
# Arrange
device = device_factory("Switch 1", [Capability.switch], {Attribute.switch: "on"})
config_entry = await setup_platform(hass, SWITCH_DOMAIN, devices=[device])
config_entry.state = ConfigEntryState.LOADED
# Act
await hass.config_entries.async_forward_entry_unload(config_entry, "switch")
# Assert
assert hass.states.get("switch.switch_1").state == STATE_UNAVAILABLE
|
{
"content_hash": "ab46efd9d9e4a6c579ec0ff29d94f8cb",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 88,
"avg_line_length": 37.41747572815534,
"alnum_prop": 0.703425012973534,
"repo_name": "kennedyshead/home-assistant",
"id": "7c202fad12ee6c352959c99c98224b089a07e7de",
"size": "3854",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/smartthings/test_switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
import unittest, StringIO, robotparser
from test import test_support
class RobotTestCase(unittest.TestCase):
def __init__(self, index, parser, url, good, agent):
unittest.TestCase.__init__(self)
if good:
self.str = "RobotTest(%d, good, %s)" % (index, url)
else:
self.str = "RobotTest(%d, bad, %s)" % (index, url)
self.parser = parser
self.url = url
self.good = good
self.agent = agent
def runTest(self):
if isinstance(self.url, tuple):
agent, url = self.url
else:
url = self.url
agent = self.agent
if self.good:
self.assertTrue(self.parser.can_fetch(agent, url))
else:
self.assertFalse(self.parser.can_fetch(agent, url))
def __str__(self):
return self.str
tests = unittest.TestSuite()
def RobotTest(index, robots_txt, good_urls, bad_urls,
agent="test_robotparser"):
lines = StringIO.StringIO(robots_txt).readlines()
parser = robotparser.RobotFileParser()
parser.parse(lines)
for url in good_urls:
tests.addTest(RobotTestCase(index, parser, url, 1, agent))
for url in bad_urls:
tests.addTest(RobotTestCase(index, parser, url, 0, agent))
# Examples from http://www.robotstxt.org/wc/norobots.html (fetched 2002)
# 1.
doc = """
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
Disallow: /tmp/ # these will soon disappear
Disallow: /foo.html
"""
good = ['/','/test.html']
bad = ['/cyberworld/map/index.html','/tmp/xxx','/foo.html']
RobotTest(1, doc, good, bad)
# 2.
doc = """
# robots.txt for http://www.example.com/
User-agent: *
Disallow: /cyberworld/map/ # This is an infinite virtual URL space
# Cybermapper knows where to go.
User-agent: cybermapper
Disallow:
"""
good = ['/','/test.html',('cybermapper','/cyberworld/map/index.html')]
bad = ['/cyberworld/map/index.html']
RobotTest(2, doc, good, bad)
# 3.
doc = """
# go away
User-agent: *
Disallow: /
"""
good = []
bad = ['/cyberworld/map/index.html','/','/tmp/']
RobotTest(3, doc, good, bad)
# Examples from http://www.robotstxt.org/wc/norobots-rfc.html (fetched 2002)
# 4.
doc = """
User-agent: figtree
Disallow: /tmp
Disallow: /a%3cd.html
Disallow: /a%2fb.html
Disallow: /%7ejoe/index.html
"""
good = [] # XFAIL '/a/b.html'
bad = ['/tmp','/tmp.html','/tmp/a.html',
'/a%3cd.html','/a%3Cd.html','/a%2fb.html',
'/~joe/index.html'
]
RobotTest(4, doc, good, bad, 'figtree')
RobotTest(5, doc, good, bad, 'FigTree Robot libwww-perl/5.04')
# 6.
doc = """
User-agent: *
Disallow: /tmp/
Disallow: /a%3Cd.html
Disallow: /a/b.html
Disallow: /%7ejoe/index.html
"""
good = ['/tmp',] # XFAIL: '/a%2fb.html'
bad = ['/tmp/','/tmp/a.html',
'/a%3cd.html','/a%3Cd.html',"/a/b.html",
'/%7Ejoe/index.html']
RobotTest(6, doc, good, bad)
# From bug report #523041
# 7.
doc = """
User-Agent: *
Disallow: /.
"""
good = ['/foo.html']
bad = [] # Bug report says "/" should be denied, but that is not in the RFC
RobotTest(7, doc, good, bad)
# From Google: http://www.google.com/support/webmasters/bin/answer.py?hl=en&answer=40364
# 8.
doc = """
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
"""
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
RobotTest(8, doc, good, bad, agent="Googlebot")
# 9. This file is incorrect because "Googlebot" is a substring of
# "Googlebot-Mobile", so test 10 works just like test 9.
doc = """
User-agent: Googlebot
Disallow: /
User-agent: Googlebot-Mobile
Allow: /
"""
good = []
bad = ['/something.jpg']
RobotTest(9, doc, good, bad, agent="Googlebot")
good = []
bad = ['/something.jpg']
RobotTest(10, doc, good, bad, agent="Googlebot-Mobile")
# 11. Get the order correct.
doc = """
User-agent: Googlebot-Mobile
Allow: /
User-agent: Googlebot
Disallow: /
"""
good = []
bad = ['/something.jpg']
RobotTest(11, doc, good, bad, agent="Googlebot")
good = ['/something.jpg']
bad = []
RobotTest(12, doc, good, bad, agent="Googlebot-Mobile")
# 13. Google also got the order wrong in #8. You need to specify the
# URLs from more specific to more general.
doc = """
User-agent: Googlebot
Allow: /folder1/myfile.html
Disallow: /folder1/
"""
good = ['/folder1/myfile.html']
bad = ['/folder1/anotherfile.html']
RobotTest(13, doc, good, bad, agent="googlebot")
class NetworkTestCase(unittest.TestCase):
def testPasswordProtectedSite(self):
test_support.requires('network')
# XXX it depends on an external resource which could be unavailable
url = 'http://mueblesmoraleda.com'
parser = robotparser.RobotFileParser()
parser.set_url(url)
try:
parser.read()
except IOError:
self.skipTest('%s is unavailable' % url)
self.assertEqual(parser.can_fetch("*", url+"/robots.txt"), False)
def testPythonOrg(self):
test_support.requires('network')
parser = robotparser.RobotFileParser(
"http://www.python.org/robots.txt")
parser.read()
self.assertTrue(parser.can_fetch("*",
"http://www.python.org/robots.txt"))
def test_main():
test_support.run_unittest(tests)
test_support.run_unittest(NetworkTestCase)
if __name__=='__main__':
test_support.verbose = 1
test_main()
|
{
"content_hash": "3219aea4863dae94cc615b7f0c9f3ddd",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 88,
"avg_line_length": 23.04255319148936,
"alnum_prop": 0.6232686980609419,
"repo_name": "fkolacek/FIT-VUT",
"id": "405d517d2e04867e4f43cc918f00454aa5f6005f",
"size": "5415",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bp-revok/python/lib/python2.7/test/test_robotparser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455326"
},
{
"name": "Awk",
"bytes": "8724"
},
{
"name": "Batchfile",
"bytes": "201"
},
{
"name": "Brainfuck",
"bytes": "83"
},
{
"name": "C",
"bytes": "5006938"
},
{
"name": "C++",
"bytes": "1835332"
},
{
"name": "CSS",
"bytes": "301045"
},
{
"name": "CoffeeScript",
"bytes": "46327"
},
{
"name": "Groff",
"bytes": "46766"
},
{
"name": "HTML",
"bytes": "937735"
},
{
"name": "Java",
"bytes": "552132"
},
{
"name": "JavaScript",
"bytes": "1742225"
},
{
"name": "Lua",
"bytes": "39700"
},
{
"name": "Makefile",
"bytes": "381793"
},
{
"name": "Objective-C",
"bytes": "4618"
},
{
"name": "PHP",
"bytes": "108701"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Perl",
"bytes": "60353"
},
{
"name": "Python",
"bytes": "22084026"
},
{
"name": "QMake",
"bytes": "2660"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ragel in Ruby Host",
"bytes": "17993"
},
{
"name": "Ruby",
"bytes": "21607145"
},
{
"name": "Shell",
"bytes": "611321"
},
{
"name": "Tcl",
"bytes": "4920"
},
{
"name": "TeX",
"bytes": "561423"
},
{
"name": "VHDL",
"bytes": "49180"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "154638"
},
{
"name": "Yacc",
"bytes": "32788"
}
],
"symlink_target": ""
}
|
"""
Testing signals emitted on changing m2m relations.
"""
from django.db import models
from django.test import TestCase
from .models import Car, Part, Person, SportsCar
class ManyToManySignalsTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.vw = Car.objects.create(name='VW')
cls.bmw = Car.objects.create(name='BMW')
cls.toyota = Car.objects.create(name='Toyota')
cls.wheelset = Part.objects.create(name='Wheelset')
cls.doors = Part.objects.create(name='Doors')
cls.engine = Part.objects.create(name='Engine')
cls.airbag = Part.objects.create(name='Airbag')
cls.sunroof = Part.objects.create(name='Sunroof')
cls.alice = Person.objects.create(name='Alice')
cls.bob = Person.objects.create(name='Bob')
cls.chuck = Person.objects.create(name='Chuck')
cls.daisy = Person.objects.create(name='Daisy')
def setUp(self):
self.m2m_changed_messages = []
def m2m_changed_signal_receiver(self, signal, sender, **kwargs):
message = {
'instance': kwargs['instance'],
'action': kwargs['action'],
'reverse': kwargs['reverse'],
'model': kwargs['model'],
}
if kwargs['pk_set']:
message['objects'] = list(
kwargs['model'].objects.filter(pk__in=kwargs['pk_set'])
)
self.m2m_changed_messages.append(message)
def tearDown(self):
# disconnect all signal handlers
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.disconnect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def _initialize_signal_car(self, add_default_parts_before_set_signal=False):
""" Install a listener on the two m2m relations. """
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.optional_parts.through
)
if add_default_parts_before_set_signal:
# adding a default part to our car - no signal listener installed
self.vw.default_parts.add(self.sunroof)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Car.default_parts.through
)
def test_m2m_relations_add_remove_clear(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
self.vw.default_parts.add(self.wheelset, self.doors, self.engine)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# give the BMW and Toyota some doors as well
self.doors.car_set.add(self.bmw, self.toyota)
expected_messages.append({
'instance': self.doors,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.doors,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_remove_relation(self):
self._initialize_signal_car()
# remove the engine from the self.vw and the airbag (which is not set
# but is returned)
self.vw.default_parts.remove(self.engine, self.airbag)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}, {
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.engine],
}
])
def test_m2m_relations_signals_give_the_self_vw_some_optional_parts(self):
expected_messages = []
self._initialize_signal_car()
# give the self.vw some optional parts (second relation to same model)
self.vw.optional_parts.add(self.airbag, self.sunroof)
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.airbag, self.sunroof],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# add airbag to all the cars (even though the self.vw already has one)
self.airbag.cars_optional.add(self.vw, self.bmw, self.toyota)
expected_messages.append({
'instance': self.airbag,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
expected_messages.append({
'instance': self.airbag,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [self.bmw, self.toyota],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_reverse_relation_with_custom_related_name(self):
self._initialize_signal_car()
# remove airbag from the self.vw (reverse relation with custom
# related_name)
self.airbag.cars_optional.remove(self.vw)
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}, {
'instance': self.airbag,
'action': 'post_remove',
'reverse': True,
'model': Car,
'objects': [self.vw],
}
])
def test_m2m_relations_signals_clear_all_parts_of_the_self_vw(self):
self._initialize_signal_car()
# clear all parts of the self.vw
self.vw.default_parts.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
}, {
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
}
])
def test_m2m_relations_signals_all_the_doors_off_of_cars(self):
self._initialize_signal_car()
# take all the doors off of cars
self.doors.car_set.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.doors,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.doors,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_reverse_relation(self):
self._initialize_signal_car()
# take all the airbags off of cars (clear reverse relation with custom
# related_name)
self.airbag.cars_optional.clear()
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.airbag,
'action': 'pre_clear',
'reverse': True,
'model': Car,
}, {
'instance': self.airbag,
'action': 'post_clear',
'reverse': True,
'model': Car,
}
])
def test_m2m_relations_signals_alternative_ways(self):
expected_messages = []
self._initialize_signal_car()
# alternative ways of setting relation:
self.vw.default_parts.create(name='Windows')
p6 = Part.objects.get(name='Windows')
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [p6],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# direct assignment clears the set first, then adds
self.vw.default_parts.set([self.wheelset, self.doors, self.engine])
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [p6],
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_clearing_removing(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# set by clearing.
self.vw.default_parts.set([self.wheelset, self.doors, self.engine], clear=True)
expected_messages.append({
'instance': self.vw,
'action': 'pre_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'post_clear',
'reverse': False,
'model': Part,
})
expected_messages.append({
'instance': self.vw,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors, self.engine, self.wheelset],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
# set by only removing what's necessary.
self.vw.default_parts.set([self.wheelset, self.doors], clear=False)
expected_messages.append({
'instance': self.vw,
'action': 'pre_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
expected_messages.append({
'instance': self.vw,
'action': 'post_remove',
'reverse': False,
'model': Part,
'objects': [self.engine],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def test_m2m_relations_signals_when_inheritance(self):
expected_messages = []
self._initialize_signal_car(add_default_parts_before_set_signal=True)
# Signals still work when model inheritance is involved
c4 = SportsCar.objects.create(name='Bugatti', price='1000000')
c4b = Car.objects.get(name='Bugatti')
c4.default_parts.set([self.doors])
expected_messages.append({
'instance': c4,
'action': 'pre_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
expected_messages.append({
'instance': c4,
'action': 'post_add',
'reverse': False,
'model': Part,
'objects': [self.doors],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
self.engine.car_set.add(c4)
expected_messages.append({
'instance': self.engine,
'action': 'pre_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
expected_messages.append({
'instance': self.engine,
'action': 'post_add',
'reverse': True,
'model': Car,
'objects': [c4b],
})
self.assertEqual(self.m2m_changed_messages, expected_messages)
def _initialize_signal_person(self):
# Install a listener on the two m2m relations.
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.fans.through
)
models.signals.m2m_changed.connect(
self.m2m_changed_signal_receiver, Person.friends.through
)
def test_m2m_relations_with_self_add_friends(self):
self._initialize_signal_person()
self.alice.friends.set([self.bob, self.chuck])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.bob, self.chuck],
}
])
def test_m2m_relations_with_self_add_fan(self):
self._initialize_signal_person()
self.alice.fans.set([self.daisy])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.alice,
'action': 'pre_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}, {
'instance': self.alice,
'action': 'post_add',
'reverse': False,
'model': Person,
'objects': [self.daisy],
}
])
def test_m2m_relations_with_self_add_idols(self):
self._initialize_signal_person()
self.chuck.idols.set([self.alice, self.bob])
self.assertEqual(self.m2m_changed_messages, [
{
'instance': self.chuck,
'action': 'pre_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}, {
'instance': self.chuck,
'action': 'post_add',
'reverse': True,
'model': Person,
'objects': [self.alice, self.bob],
}
])
|
{
"content_hash": "3b4896ab5844b00eff21a9c9ab7e27aa",
"timestamp": "",
"source": "github",
"line_count": 465,
"max_line_length": 87,
"avg_line_length": 34.4494623655914,
"alnum_prop": 0.5178225856795056,
"repo_name": "fenginx/django",
"id": "1e063e8a562ea5511016c8e380a94f299ca2f4f2",
"size": "16019",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "tests/m2m_signals/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "48399"
},
{
"name": "HTML",
"bytes": "175296"
},
{
"name": "JavaScript",
"bytes": "238848"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11137863"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
class GroupsRequest(object):
def __init__(self):
self.groups = None
self.request = None
self.filename = None
def getGroups(self):
return self.groups
def setGroups(self, groups):
self.groups = groups
def getRequest(self):
return self.request
def setRequest(self, request):
self.request = request
def getFilename(self):
return self.filename
def setFilename(self, filename):
self.filename = filename
|
{
"content_hash": "7ac66b41dd60cdbe2137edb4c6fcffef",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 36,
"avg_line_length": 20.2,
"alnum_prop": 0.6099009900990099,
"repo_name": "mjames-upc/python-awips",
"id": "f57e491bd67008ecd3be625728fb52193327b53b",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamicserialize/dstypes/com/raytheon/uf/common/pypies/request/GroupsRequest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "27192"
},
{
"name": "Python",
"bytes": "714011"
}
],
"symlink_target": ""
}
|
from ceilometer.network.statistics import port_v2
from ceilometer import sample
from ceilometer.tests.unit.network import statistics
class TestPortPollsters(statistics._PollsterTestBase):
def test_port_pollster(self):
self._test_pollster(
port_v2.PortPollster,
'port',
sample.TYPE_GAUGE,
'port')
def test_port_pollster_uptime(self):
self._test_pollster(
port_v2.PortPollsterUptime,
'port.uptime',
sample.TYPE_GAUGE,
's')
def test_port_pollster_receive_packets(self):
self._test_pollster(
port_v2.PortPollsterReceivePackets,
'port.receive.packets',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_transmit_packets(self):
self._test_pollster(
port_v2.PortPollsterTransmitPackets,
'port.transmit.packets',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_bytes(self):
self._test_pollster(
port_v2.PortPollsterReceiveBytes,
'port.receive.bytes',
sample.TYPE_CUMULATIVE,
'B')
def test_port_pollster_transmit_bytes(self):
self._test_pollster(
port_v2.PortPollsterTransmitBytes,
'port.transmit.bytes',
sample.TYPE_CUMULATIVE,
'B')
def test_port_pollster_receive_drops(self):
self._test_pollster(
port_v2.PortPollsterReceiveDrops,
'port.receive.drops',
sample.TYPE_CUMULATIVE,
'packet')
def test_port_pollster_receive_errors(self):
self._test_pollster(
port_v2.PortPollsterReceiveErrors,
'port.receive.errors',
sample.TYPE_CUMULATIVE,
'packet')
|
{
"content_hash": "ca6d6591aee678d970fec2e06c2237a8",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 54,
"avg_line_length": 29.870967741935484,
"alnum_prop": 0.5869330453563715,
"repo_name": "openstack/ceilometer",
"id": "684247113b012905282975c11055a0fbfb585896",
"size": "2472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceilometer/tests/unit/network/statistics/test_port_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1333367"
},
{
"name": "Shell",
"bytes": "18703"
}
],
"symlink_target": ""
}
|
import os
import re
import logging
from ctypes import c_int32
from collections import defaultdict
from devlib.utils.csvutil import create_writer, csvwriter
from wa.utils.trace_cmd import TraceCmdParser, trace_has_marker, TRACE_MARKER_START, TRACE_MARKER_STOP
logger = logging.getLogger('cpustates')
INIT_CPU_FREQ_REGEX = re.compile(r'CPU (?P<cpu>\d+) FREQUENCY: (?P<freq>\d+) kHZ')
DEVLIB_CPU_FREQ_REGEX = re.compile(r'cpu_frequency(?:_devlib):\s+state=(?P<freq>\d+)\s+cpu_id=(?P<cpu>\d+)')
class CorePowerTransitionEvent(object):
kind = 'transition'
__slots__ = ['timestamp', 'cpu_id', 'frequency', 'idle_state']
def __init__(self, timestamp, cpu_id, frequency=None, idle_state=None):
if (frequency is None) == (idle_state is None):
raise ValueError('Power transition must specify a frequency or an idle_state, but not both.')
self.timestamp = timestamp
self.cpu_id = cpu_id
self.frequency = frequency
self.idle_state = idle_state
def __str__(self):
return 'cpu {} @ {} -> freq: {} idle: {}'.format(self.cpu_id, self.timestamp,
self.frequency, self.idle_state)
def __repr__(self):
return 'CPTE(c:{} t:{} f:{} i:{})'.format(self.cpu_id, self.timestamp,
self.frequency, self.idle_state)
class CorePowerDroppedEvents(object):
kind = 'dropped_events'
__slots__ = ['cpu_id']
def __init__(self, cpu_id):
self.cpu_id = cpu_id
def __str__(self):
return 'DROPPED EVENTS on CPU{}'.format(self.cpu_id)
__repr__ = __str__
class TraceMarkerEvent(object):
kind = 'marker'
__slots__ = ['name']
def __init__(self, name):
self.name = name
def __str__(self):
return 'MARKER: {}'.format(self.name)
class CpuPowerState(object):
__slots__ = ['frequency', 'idle_state']
@property
def is_idling(self):
return self.idle_state is not None and self.idle_state >= 0
@property
def is_active(self):
return self.idle_state == -1
def __init__(self, frequency=None, idle_state=None):
self.frequency = frequency
self.idle_state = idle_state
def __str__(self):
return 'CP(f:{} i:{})'.format(self.frequency, self.idle_state)
__repr__ = __str__
class SystemPowerState(object):
__slots__ = ['timestamp', 'cpus']
@property
def num_cores(self):
return len(self.cpus)
def __init__(self, num_cores, no_idle=False):
self.timestamp = None
self.cpus = []
idle_state = -1 if no_idle else None
for _ in range(num_cores):
self.cpus.append(CpuPowerState(idle_state=idle_state))
def copy(self):
new = SystemPowerState(self.num_cores)
new.timestamp = self.timestamp
for i, c in enumerate(self.cpus):
new.cpus[i].frequency = c.frequency
new.cpus[i].idle_state = c.idle_state
return new
def __str__(self):
return 'SP(t:{} Cs:{})'.format(self.timestamp, self.cpus)
__repr__ = __str__
class PowerStateProcessor(object):
"""
This takes a stream of power transition events and yields a timeline stream
of system power states.
"""
@property
def cpu_states(self):
return self.power_state.cpus
@property
def current_time(self):
return self.power_state.timestamp
@current_time.setter
def current_time(self, value):
self.power_state.timestamp = value
def __init__(self, cpus, wait_for_marker=True, no_idle=None):
if no_idle is None:
no_idle = not (cpus[0].cpuidle and cpus[0].cpuidle.states)
self.power_state = SystemPowerState(len(cpus), no_idle=no_idle)
self.requested_states = {} # cpu_id -> requeseted state
self.wait_for_marker = wait_for_marker
self._saw_start_marker = False
self._saw_stop_marker = False
self.exceptions = []
self.idle_related_cpus = build_idle_state_map(cpus)
def process(self, event_stream):
for event in event_stream:
try:
next_state = self.update_power_state(event)
if self._saw_start_marker or not self.wait_for_marker:
yield next_state
if self._saw_stop_marker:
break
except Exception as e: # pylint: disable=broad-except
self.exceptions.append(e)
else:
if self.wait_for_marker:
logger.warning("Did not see a STOP marker in the trace")
def update_power_state(self, event):
"""
Update the tracked power state based on the specified event and
return updated power state.
"""
if event.kind == 'transition':
self._process_transition(event)
elif event.kind == 'dropped_events':
self._process_dropped_events(event)
elif event.kind == 'marker':
if event.name == 'START':
self._saw_start_marker = True
elif event.name == 'STOP':
self._saw_stop_marker = True
else:
raise ValueError('Unexpected event type: {}'.format(event.kind))
return self.power_state.copy()
def _process_transition(self, event):
self.current_time = event.timestamp
if event.idle_state is None:
self.cpu_states[event.cpu_id].frequency = event.frequency
else:
if event.idle_state == -1:
self._process_idle_exit(event)
else:
self._process_idle_entry(event)
def _process_dropped_events(self, event):
self.cpu_states[event.cpu_id].frequency = None
old_idle_state = self.cpu_states[event.cpu_id].idle_state
self.cpu_states[event.cpu_id].idle_state = None
related_ids = self.idle_related_cpus[(event.cpu_id, old_idle_state)]
for rid in related_ids:
self.cpu_states[rid].idle_state = None
def _process_idle_entry(self, event):
if self.cpu_states[event.cpu_id].is_idling:
raise ValueError('Got idle state entry event for an idling core: {}'.format(event))
self.requested_states[event.cpu_id] = event.idle_state
self._try_transition_to_idle_state(event.cpu_id, event.idle_state)
def _process_idle_exit(self, event):
if self.cpu_states[event.cpu_id].is_active:
raise ValueError('Got idle state exit event for an active core: {}'.format(event))
self.requested_states.pop(event.cpu_id, None) # remove outstanding request if there is one
old_state = self.cpu_states[event.cpu_id].idle_state
self.cpu_states[event.cpu_id].idle_state = -1
related_ids = self.idle_related_cpus[(event.cpu_id, old_state)]
if old_state is not None:
new_state = old_state - 1
for rid in related_ids:
if self.cpu_states[rid].idle_state > new_state:
self._try_transition_to_idle_state(rid, new_state)
def _try_transition_to_idle_state(self, cpu_id, idle_state):
related_ids = self.idle_related_cpus[(cpu_id, idle_state)]
# Tristate: True - can transition, False - can't transition,
# None - unknown idle state on at least one related cpu
transition_check = self._can_enter_state(related_ids, idle_state)
if transition_check is None:
# Unknown state on a related cpu means we're not sure whether we're
# entering requested state or a shallower one
self.cpu_states[cpu_id].idle_state = None
return
# Keep trying shallower states until all related
while not self._can_enter_state(related_ids, idle_state):
idle_state -= 1
related_ids = self.idle_related_cpus[(cpu_id, idle_state)]
self.cpu_states[cpu_id].idle_state = idle_state
for rid in related_ids:
self.cpu_states[rid].idle_state = idle_state
def _can_enter_state(self, related_ids, state):
"""
This is a tri-state check. Returns ``True`` if related cpu states allow transition
into this state, ``False`` if related cpu states don't allow transition into this
state, and ``None`` if at least one of the related cpus is in an unknown state
(so the decision of whether a transition is possible cannot be made).
"""
for rid in related_ids:
rid_requested_state = self.requested_states.get(rid, None)
rid_current_state = self.cpu_states[rid].idle_state
if rid_current_state is None:
return None
if rid_current_state < state:
if rid_requested_state is None or rid_requested_state < state:
return False
return True
def stream_cpu_power_transitions(events):
for event in events:
if event.name == 'cpu_idle':
state = c_int32(event.state).value
yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, idle_state=state)
elif event.name == 'cpu_frequency':
yield CorePowerTransitionEvent(event.timestamp, event.cpu_id, frequency=event.state)
elif event.name == 'DROPPED EVENTS DETECTED':
yield CorePowerDroppedEvents(event.cpu_id)
elif event.name == 'print':
if TRACE_MARKER_START in event.text:
yield TraceMarkerEvent('START')
elif TRACE_MARKER_STOP in event.text:
yield TraceMarkerEvent('STOP')
else:
if 'cpu_frequency' in event.text:
match = DEVLIB_CPU_FREQ_REGEX.search(event.text)
else:
match = INIT_CPU_FREQ_REGEX.search(event.text)
if match:
yield CorePowerTransitionEvent(event.timestamp,
int(match.group('cpu')),
frequency=int(match.group('freq')))
def gather_core_states(system_state_stream, freq_dependent_idle_states=None): # NOQA
if freq_dependent_idle_states is None:
freq_dependent_idle_states = []
for system_state in system_state_stream:
core_states = []
for cpu in system_state.cpus:
if cpu.idle_state == -1:
core_states.append((-1, cpu.frequency))
elif cpu.idle_state in freq_dependent_idle_states:
if cpu.frequency is not None:
core_states.append((cpu.idle_state, cpu.frequency))
else:
core_states.append((None, None))
else:
core_states.append((cpu.idle_state, None))
yield (system_state.timestamp, core_states)
def record_state_transitions(reporter, stream):
for event in stream:
if event.kind == 'transition':
reporter.record_transition(event)
yield event
class PowerStateTransitions(object):
name = 'transitions-timeline'
def __init__(self, output_directory):
self.filepath = os.path.join(output_directory, 'state-transitions-timeline.csv')
self.writer, self._wfh = create_writer(self.filepath)
headers = ['timestamp', 'cpu_id', 'frequency', 'idle_state']
self.writer.writerow(headers)
def update(self, timestamp, core_states): # NOQA
# Just recording transitions, not doing anything
# with states.
pass
def record_transition(self, transition):
row = [transition.timestamp, transition.cpu_id,
transition.frequency, transition.idle_state]
self.writer.writerow(row)
def report(self):
return self
def write(self):
self._wfh.close()
class PowerStateTimeline(object):
name = 'state-timeline'
def __init__(self, output_directory, cpus):
self.filepath = os.path.join(output_directory, 'power-state-timeline.csv')
self.idle_state_names = {cpu.id: [s.name for s in cpu.cpuidle.states] for cpu in cpus}
self.writer, self._wfh = create_writer(self.filepath)
headers = ['ts'] + ['{} CPU{}'.format(cpu.name, cpu.id) for cpu in cpus]
self.writer.writerow(headers)
def update(self, timestamp, core_states): # NOQA
row = [timestamp]
for cpu_idx, (idle_state, frequency) in enumerate(core_states):
if frequency is None:
if idle_state == -1:
row.append('Running (unknown kHz)')
elif idle_state is None:
row.append('unknown')
elif not self.idle_state_names[cpu_idx]:
row.append('idle[{}]'.format(idle_state))
else:
row.append(self.idle_state_names[cpu_idx][idle_state])
else: # frequency is not None
if idle_state == -1:
row.append(frequency)
elif idle_state is None:
row.append('unknown')
else:
row.append('{} ({})'.format(self.idle_state_names[cpu_idx][idle_state],
frequency))
self.writer.writerow(row)
def report(self):
return self
def write(self):
self._wfh.close()
class ParallelStats(object):
def __init__(self, output_directory, cpus, use_ratios=False):
self.filepath = os.path.join(output_directory, 'parallel-stats.csv')
self.clusters = defaultdict(set)
self.use_ratios = use_ratios
clusters = []
for cpu in cpus:
if cpu.cpufreq.related_cpus not in clusters:
clusters.append(cpu.cpufreq.related_cpus)
for i, clust in enumerate(clusters):
self.clusters[str(i)] = set(clust)
self.clusters['all'] = {cpu.id for cpu in cpus}
self.first_timestamp = None
self.last_timestamp = None
self.previous_states = None
self.parallel_times = defaultdict(lambda: defaultdict(int))
self.running_times = defaultdict(int)
def update(self, timestamp, core_states):
if self.last_timestamp is not None:
delta = timestamp - self.last_timestamp
active_cores = [i for i, c in enumerate(self.previous_states)
if c and c[0] == -1]
for cluster, cluster_cores in self.clusters.items():
clust_active_cores = len(cluster_cores.intersection(active_cores))
self.parallel_times[cluster][clust_active_cores] += delta
if clust_active_cores:
self.running_times[cluster] += delta
else: # initial update
self.first_timestamp = timestamp
self.last_timestamp = timestamp
self.previous_states = core_states
def report(self): # NOQA
if self.last_timestamp is None:
return None
report = ParallelReport(self.filepath)
total_time = self.last_timestamp - self.first_timestamp
for cluster in sorted(self.parallel_times):
running_time = self.running_times[cluster]
for n in range(len(self.clusters[cluster]) + 1):
time = self.parallel_times[cluster][n]
time_pc = time / total_time
if not self.use_ratios:
time_pc *= 100
if n:
if running_time:
running_time_pc = time / running_time
else:
running_time_pc = 0
if not self.use_ratios:
running_time_pc *= 100
else:
running_time_pc = 0
precision = 3 if self.use_ratios else 1
fmt = '{{:.{}f}}'.format(precision)
report.add([cluster, n,
fmt.format(time),
fmt.format(time_pc),
fmt.format(running_time_pc),
])
return report
class ParallelReport(object):
name = 'parallel-stats'
def __init__(self, filepath):
self.filepath = filepath
self.values = []
def add(self, value):
self.values.append(value)
def write(self):
with csvwriter(self.filepath) as writer:
writer.writerow(['cluster', 'number_of_cores', 'total_time', '%time', '%running_time'])
writer.writerows(self.values)
class PowerStateStats(object):
def __init__(self, output_directory, cpus, use_ratios=False):
self.filepath = os.path.join(output_directory, 'power-state-stats.csv')
self.core_names = [cpu.name for cpu in cpus]
self.idle_state_names = {cpu.id: [s.name for s in cpu.cpuidle.states] for cpu in cpus}
self.use_ratios = use_ratios
self.first_timestamp = None
self.last_timestamp = None
self.previous_states = None
self.cpu_states = defaultdict(lambda: defaultdict(int))
def update(self, timestamp, core_states): # NOQA
if self.last_timestamp is not None:
delta = timestamp - self.last_timestamp
for cpu, (idle, freq) in enumerate(self.previous_states):
if idle == -1:
if freq is not None:
state = '{:07}KHz'.format(freq)
else:
state = 'Running (unknown KHz)'
elif freq:
state = '{}-{:07}KHz'.format(self.idle_state_names[cpu][idle], freq)
elif idle is not None and self.idle_state_names[cpu]:
state = self.idle_state_names[cpu][idle]
else:
state = 'unknown'
self.cpu_states[cpu][state] += delta
else: # initial update
self.first_timestamp = timestamp
self.last_timestamp = timestamp
self.previous_states = core_states
def report(self):
if self.last_timestamp is None:
return None
total_time = self.last_timestamp - self.first_timestamp
state_stats = defaultdict(lambda: [None] * len(self.core_names))
for cpu, states in self.cpu_states.items():
for state in states:
time = states[state]
time_pc = time / total_time
if not self.use_ratios:
time_pc *= 100
state_stats[state][cpu] = time_pc
precision = 3 if self.use_ratios else 1
return PowerStateStatsReport(self.filepath, state_stats, self.core_names, precision)
class PowerStateStatsReport(object):
name = 'power-state-stats'
def __init__(self, filepath, state_stats, core_names, precision=2):
self.filepath = filepath
self.state_stats = state_stats
self.core_names = core_names
self.precision = precision
def write(self):
with csvwriter(self.filepath) as writer:
headers = ['state'] + ['{} CPU{}'.format(c, i)
for i, c in enumerate(self.core_names)]
writer.writerow(headers)
for state in sorted(self.state_stats):
stats = self.state_stats[state]
fmt = '{{:.{}f}}'.format(self.precision)
writer.writerow([state] + [fmt.format(s if s is not None else 0)
for s in stats])
class CpuUtilizationTimeline(object):
name = 'utilization-timeline'
def __init__(self, output_directory, cpus):
self.filepath = os.path.join(output_directory, 'utilization-timeline.csv')
self.writer, self._wfh = create_writer(self.filepath)
headers = ['ts'] + ['{} CPU{}'.format(cpu.name, cpu.id) for cpu in cpus]
self.writer.writerow(headers)
self._max_freq_list = [cpu.cpufreq.available_frequencies[-1] for cpu in cpus if cpu.cpufreq.available_frequencies]
def update(self, timestamp, core_states): # NOQA
row = [timestamp]
for core, [_, frequency] in enumerate(core_states):
if frequency is not None and core in self._max_freq_list:
frequency /= float(self._max_freq_list[core])
row.append(frequency)
else:
row.append(None)
self.writer.writerow(row)
def report(self):
return self
def write(self):
self._wfh.close()
def build_idle_state_map(cpus):
idle_state_map = defaultdict(list)
for cpu_idx, cpu in enumerate(cpus):
related_cpus = set(cpu.cpufreq.related_cpus) - set([cpu_idx])
first_cluster_state = cpu.cpuidle.num_states - 1
for state_idx, _ in enumerate(cpu.cpuidle.states):
if state_idx < first_cluster_state:
idle_state_map[(cpu_idx, state_idx)] = []
else:
idle_state_map[(cpu_idx, state_idx)] = list(related_cpus)
return idle_state_map
def report_power_stats(trace_file, cpus, output_basedir, use_ratios=False, no_idle=None, # pylint: disable=too-many-locals
split_wfi_states=False):
"""
Process trace-cmd output to generate timelines and statistics of CPU power
state (a.k.a P- and C-state) transitions in the trace.
The results will be written into a subdirectory called "power-stats" under
the specified ``output_basedir``.
:param trace_file: trace-cmd's text trace to process.
:param cpus: A list of ``CpuInfo`` objects describing a target's CPUs.
These are typically reported as part of ``TargetInfo`` in
WA output.
:param output_basedir: Base location for the output. This directory must
exist and must not contain a directory of file
named ``"power-states"``.
:param use_rations: By default, stats will be reported as percentages. Set
this to ``True`` to report stats as decimals in the
``0 <= value <= 1`` instead.
:param no_idle: ``False`` if cpuidle and at least one idle state per CPU are
enabled, should be ``True`` otherwise. This influences the
assumptions about CPU's initial states. If not explicitly
set, the value for this will be guessed based on whether
cpuidle states are present in the first ``CpuInfo``.
The output directory will contain the following files:
power-state-stats.csv
Power state residency statistics for each CPU. Shows the percentage of
time a CPU has spent in each of its available power states.
parallel-stats.csv
Parallel execution stats for each CPU cluster, and combined stats for
the whole system.
power-state-timeline.csv
Timeline of CPU power states. Shows which power state each CPU is in at
a point in time.
state-transitions-timeline.csv
Timeline of CPU power state transitions. Each entry shows a CPU's
transition from one power state to another.
utilzation-timeline.csv
Timeline of CPU utilizations.
.. note:: Timeline entries aren't at regular intervals, but at times of
power transition events.
Stats are generated by assembling a pipeline consisting of the following
stages:
1. Parse trace into trace events
2. Filter trace events into power state transition events
3. Record power state transitions
4. Convert transitions into a power states.
5. Collapse the power states into timestamped ``(C state, P state)``
tuples for each cpu.
6. Update reporters/stats generators with cpu states.
"""
output_directory = os.path.join(output_basedir, 'power-states')
if not os.path.isdir(output_directory):
os.mkdir(output_directory)
freq_dependent_idle_states = []
if split_wfi_states:
freq_dependent_idle_states = [0]
# init trace, processor, and reporters
# note: filter_markers is False here, even though we *will* filter by them. The
# reason for this is that we want to observe events before the start
# marker in order to establish the intial power states.
parser = TraceCmdParser(filter_markers=False,
events=['cpu_idle', 'cpu_frequency', 'print'])
ps_processor = PowerStateProcessor(cpus, wait_for_marker=trace_has_marker(trace_file),
no_idle=no_idle)
transitions_reporter = PowerStateTransitions(output_directory)
reporters = [
ParallelStats(output_directory, cpus, use_ratios),
PowerStateStats(output_directory, cpus, use_ratios),
PowerStateTimeline(output_directory, cpus),
CpuUtilizationTimeline(output_directory, cpus),
transitions_reporter,
]
# assemble the pipeline
event_stream = parser.parse(trace_file)
transition_stream = stream_cpu_power_transitions(event_stream)
recorded_trans_stream = record_state_transitions(transitions_reporter, transition_stream)
power_state_stream = ps_processor.process(recorded_trans_stream)
core_state_stream = gather_core_states(power_state_stream, freq_dependent_idle_states)
# execute the pipeline
for timestamp, states in core_state_stream:
for reporter in reporters:
reporter.update(timestamp, states)
# report any issues encountered while executing the pipeline
if ps_processor.exceptions:
logger.warning('There were errors while processing trace:')
for e in ps_processor.exceptions:
logger.warning(str(e))
# generate reports
reports = {}
for reporter in reporters:
report = reporter.report()
report.write()
reports[report.name] = report
return reports
|
{
"content_hash": "46caf81fd8a84be863bd46a8e08a3f89",
"timestamp": "",
"source": "github",
"line_count": 692,
"max_line_length": 123,
"avg_line_length": 37.660404624277454,
"alnum_prop": 0.5893864395073097,
"repo_name": "ARM-software/lisa",
"id": "60a16fb2f38a3ae8c54e0e5f07f1b1b2dd6d4b7b",
"size": "26648",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "external/workload-automation/wa/utils/cpustates.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68635"
},
{
"name": "Jupyter Notebook",
"bytes": "60193313"
},
{
"name": "Makefile",
"bytes": "6176"
},
{
"name": "Perl",
"bytes": "6106"
},
{
"name": "Python",
"bytes": "2337042"
},
{
"name": "Shell",
"bytes": "108802"
}
],
"symlink_target": ""
}
|
import bob.db.verification.filelist
import bob.bio.base
# babel_wav_directory = "[YOUR_HUNSPEECH_WAV_DIRECTORY]"
database = bob.bio.base.database.DatabaseBob(
database = bob.db.verification.filelist.Database(
# TODO Later replace this first argument by
# pkg_resources.resource_filename(
# 'bob.db.hunspeech', 'config/database/hunspeech')
'/mnt/store/makrai/project/speech/babel/bob_list/', # Contains the file lists
original_directory = '/mnt/store/hlt/Speech/LangPack',
original_extension = ".wav",),
name = "babel",
protocol = '', # There is only a single protocol
)
|
{
"content_hash": "e58cd32000c1a64baa695277fd693937",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 85,
"avg_line_length": 39.75,
"alnum_prop": 0.6776729559748428,
"repo_name": "juditacs/hunspeech",
"id": "8a981a9e28ec8ec23e67f0c2c9febb2267134876",
"size": "636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emLid/babel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23917"
}
],
"symlink_target": ""
}
|
import sst
# Define SST core options
sst.setProgramOption("timebase", "1ps")
sst.setProgramOption("stopAtCycle", "0 ns")
# Define the simulation components
comp_cpu = sst.Component("cpu", "miranda.BaseCPU")
comp_cpu.addParams({
"verbose" : 0,
"generator" : "miranda.SingleStreamGenerator",
"generatorParams.verbose" : 0,
"generatorParams.startat" : 3,
"generatorParams.count" : 500000,
"generatorParams.max_address" : 512000,
"printStats" : 1,
})
# Tell SST what statistics handling we want
sst.setStatisticLoadLevel(4)
# Enable statistics outputs
comp_cpu.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_l1cache = sst.Component("l1cache", "memHierarchy.Cache")
comp_l1cache.addParams({
"access_latency_cycles" : "2",
"cache_frequency" : "2 Ghz",
"replacement_policy" : "lru",
"coherence_protocol" : "MESI",
"associativity" : "4",
"cache_line_size" : "64",
"prefetcher" : "cassini.StridePrefetcher",
"debug" : "1",
"L1" : "1",
"cache_size" : "2KB"
})
# Enable statistics outputs
comp_l1cache.enableAllStatistics({"type":"sst.AccumulatorStatistic"})
comp_memory = sst.Component("memory", "memHierarchy.MemController")
comp_memory.addParams({
"coherence_protocol" : "MESI",
"backend.access_time" : "1000 ns",
"backend.mem_size" : "512",
"clock" : "1GHz",
"backend" : "memHierarchy.goblinHMCSim",
"backend.device_count" : "1",
"backend.link_count" : "4",
"backend.vault_count" : "32",
"backend.queue_depth" : "64",
"backend.bank_count" : "16",
"backend.dram_count" : "20",
"backend.capacity_per_device" : "4",
"backend.xbar_depth" : "128",
"backend.max_req_size" : "128",
"backend.trace-banks" : "1",
"backend.trace-queue" : "1",
"backend.trace-cmds" : "1",
"backend.trace-latency" : "1",
"backend.trace-stalls" : "1"
})
# Define the simulation links
link_cpu_cache_link = sst.Link("link_cpu_cache_link")
link_cpu_cache_link.connect( (comp_cpu, "cache_link", "1000ps"), (comp_l1cache, "high_network_0", "1000ps") )
link_cpu_cache_link.setNoCut()
link_mem_bus_link = sst.Link("link_mem_bus_link")
link_mem_bus_link.connect( (comp_l1cache, "low_network_0", "50ps"), (comp_memory, "direct_link", "50ps") )
|
{
"content_hash": "704a9b02e4c95698f7f098ccbb5f90da",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 109,
"avg_line_length": 32.013888888888886,
"alnum_prop": 0.6433839479392625,
"repo_name": "tactcomplabs/gc64-hmcsim",
"id": "31d27f09a9e1bf9a88221bba1677d5696ea23842",
"size": "2305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/sst/6.0.0/goblin_singlestream2-trace.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "894956"
},
{
"name": "C++",
"bytes": "34203"
},
{
"name": "Gnuplot",
"bytes": "443"
},
{
"name": "Lex",
"bytes": "2889"
},
{
"name": "Makefile",
"bytes": "39168"
},
{
"name": "Python",
"bytes": "153082"
},
{
"name": "Roff",
"bytes": "2856"
},
{
"name": "Shell",
"bytes": "47081"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('profiles', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Default_Group',
fields=[
('group_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='auth.Group')),
],
options={
},
bases=('auth.group',),
),
]
|
{
"content_hash": "29fb21548b358c6baf0740e4916ac23f",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 141,
"avg_line_length": 25.043478260869566,
"alnum_prop": 0.5434027777777778,
"repo_name": "joelstanner/django-imager",
"id": "182def9b97f650cf46f11ddda2e6ba5d4cab86ba",
"size": "600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imager/profiles/migrations/0002_default_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "62046"
},
{
"name": "HTML",
"bytes": "10983"
},
{
"name": "JavaScript",
"bytes": "91780"
},
{
"name": "Python",
"bytes": "67166"
}
],
"symlink_target": ""
}
|
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
("pokemon_v2", "0003_auto_20160530_1132"),
]
operations = [
migrations.AlterField(
model_name="language",
name="iso639",
field=models.CharField(max_length=10),
preserve_default=True,
),
]
|
{
"content_hash": "787a1371a15a1ad6d0c899ffb9d34310",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 50,
"avg_line_length": 22.176470588235293,
"alnum_prop": 0.5702917771883289,
"repo_name": "PokeAPI/pokeapi",
"id": "fa62bed381b76aed3df9d34a516cb3afd8a76b63",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pokemon_v2/migrations/0004_iso639length_20191217.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "573"
},
{
"name": "Go",
"bytes": "2087"
},
{
"name": "JavaScript",
"bytes": "3004"
},
{
"name": "Makefile",
"bytes": "4888"
},
{
"name": "Python",
"bytes": "699636"
},
{
"name": "Shell",
"bytes": "13500"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class PacketCaptureResult(Model):
"""Information about packet capture session.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar name: Name of the packet capture session.
:vartype name: str
:ivar id: ID of the packet capture operation.
:vartype id: str
:param etag: Default value: "A unique read-only string that changes
whenever the resource is updated." .
:type etag: str
:param target: The ID of the targeted resource, only VM is currently
supported.
:type target: str
:param bytes_to_capture_per_packet: Number of bytes captured per packet,
the remaining bytes are truncated. Default value: 0 .
:type bytes_to_capture_per_packet: int
:param total_bytes_per_session: Maximum size of the capture output.
Default value: 1073741824 .
:type total_bytes_per_session: int
:param time_limit_in_seconds: Maximum duration of the capture session in
seconds. Default value: 18000 .
:type time_limit_in_seconds: int
:param storage_location:
:type storage_location: :class:`PacketCaptureStorageLocation
<azure.mgmt.network.v2016_12_01.models.PacketCaptureStorageLocation>`
:param filters:
:type filters: list of :class:`PacketCaptureFilter
<azure.mgmt.network.v2016_12_01.models.PacketCaptureFilter>`
:param provisioning_state: The provisioning state of the packet capture
session. Possible values include: 'Succeeded', 'Updating', 'Deleting',
'Failed'
:type provisioning_state: str or :class:`ProvisioningState
<azure.mgmt.network.v2016_12_01.models.ProvisioningState>`
"""
_validation = {
'name': {'readonly': True},
'id': {'readonly': True},
'target': {'required': True},
'storage_location': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'target': {'key': 'properties.target', 'type': 'str'},
'bytes_to_capture_per_packet': {'key': 'properties.bytesToCapturePerPacket', 'type': 'int'},
'total_bytes_per_session': {'key': 'properties.totalBytesPerSession', 'type': 'int'},
'time_limit_in_seconds': {'key': 'properties.timeLimitInSeconds', 'type': 'int'},
'storage_location': {'key': 'properties.storageLocation', 'type': 'PacketCaptureStorageLocation'},
'filters': {'key': 'properties.filters', 'type': '[PacketCaptureFilter]'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(self, target, storage_location, etag="A unique read-only string that changes whenever the resource is updated.", bytes_to_capture_per_packet=0, total_bytes_per_session=1073741824, time_limit_in_seconds=18000, filters=None, provisioning_state=None):
self.name = None
self.id = None
self.etag = etag
self.target = target
self.bytes_to_capture_per_packet = bytes_to_capture_per_packet
self.total_bytes_per_session = total_bytes_per_session
self.time_limit_in_seconds = time_limit_in_seconds
self.storage_location = storage_location
self.filters = filters
self.provisioning_state = provisioning_state
|
{
"content_hash": "eaaec40c7a4372c72d7e49048361b069",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 265,
"avg_line_length": 46.81944444444444,
"alnum_prop": 0.6659744882824088,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "78f6eb10241ebc5bab8b98d7945af9ba9ad35696",
"size": "3845",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/packet_capture_result.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
import csv,os
import serial_reader
#trydemo.myfn()
import time
while True:
serial_reader.myfn()
time.sleep(5)
|
{
"content_hash": "a6caa898fa041dc851bc635a833f1175",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 24,
"avg_line_length": 17.142857142857142,
"alnum_prop": 0.7083333333333334,
"repo_name": "kartikpalani/Smart-Energy-Hackathon",
"id": "a0cafa12e6d9cf6783334a4bfc81591172855eeb",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Moto-Energy/User_Interface/serial_caller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "86481"
},
{
"name": "C",
"bytes": "18418"
},
{
"name": "C++",
"bytes": "196652"
},
{
"name": "CSS",
"bytes": "353160"
},
{
"name": "D",
"bytes": "4956"
},
{
"name": "Java",
"bytes": "105707"
},
{
"name": "JavaScript",
"bytes": "3874299"
},
{
"name": "M",
"bytes": "8537"
},
{
"name": "Matlab",
"bytes": "144144"
},
{
"name": "PHP",
"bytes": "1589600"
},
{
"name": "Python",
"bytes": "31497"
}
],
"symlink_target": ""
}
|
import logging
import os
from helper import file_helper
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
filename = '../config/firewall_config.pkl'
path = os.path.join(os.path.dirname(os.path.abspath(__file__)), filename)
def save(dict):
return file_helper.save(path, dict)
def load():
return file_helper.load(path)
|
{
"content_hash": "472a38ac24f2f94238b6a0eeac44452d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 76,
"avg_line_length": 21.11764705882353,
"alnum_prop": 0.7214484679665738,
"repo_name": "vcpe-io/vcpe-hub",
"id": "15e29e6edbc935cb93c83370aca7e203e7b46b39",
"size": "359",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "home_gateway/models/firewall_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "102864"
}
],
"symlink_target": ""
}
|
from .interface import TorchModel, TorchSequenceModel
from . import util, data, interface
|
{
"content_hash": "41417db20f5d39716a3a28a446ae1064",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 53,
"avg_line_length": 45,
"alnum_prop": 0.8222222222222222,
"repo_name": "mattHawthorn/sk-torch",
"id": "c06f150f99333597f39e4a1b65bd684f35d99746",
"size": "105",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sktorch/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91963"
}
],
"symlink_target": ""
}
|
from django.conf.urls import url
from data_ingestion import views
urlpatterns = [
# ex: /data-ingestion-page/
url(r'^$', views.index, name='indexData'),
# ex: /data-ingestion-page/5/
url(r'^(?P<collection_id>[0-9]+)/$', views.detail, name='detail'),
# ex: /data-ingestion-page/5/edit
url(r'^(?P<collection_id>[0-9]+)/editData/$', views.editData, name='editData'),
url(r'^(?P<id>[0-9]+)/deleteData$', views.deleteData, name='deleteData'),
]
|
{
"content_hash": "d83a9a571bd6f11eac1cbb851d87ad2d",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 39,
"alnum_prop": 0.6324786324786325,
"repo_name": "SISTEMAsw/TAMP",
"id": "068cb8d921714c3187647516788982c3a32ddb12",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/data_ingestion/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "158211"
},
{
"name": "CSS",
"bytes": "28165"
},
{
"name": "HTML",
"bytes": "284694"
},
{
"name": "JavaScript",
"bytes": "9602"
},
{
"name": "Makefile",
"bytes": "772"
},
{
"name": "Python",
"bytes": "389072"
},
{
"name": "Shell",
"bytes": "4390"
}
],
"symlink_target": ""
}
|
import random
from googleimages import GoogleImagesSearch
words = ["ability","able","aboard","about","above","accept","accident","according",
"account","accurate","acres","across","act","action","active","activity",
"actual","actually","add","addition","additional","adjective","adult","adventure",
"advice","affect","afraid","after","afternoon","again","against","age",
"ago","agree","ahead","aid","air","airplane","alike","alive", "all","allow","almost","alone","along","aloud","alphabet","already",
"also","although","am","among","amount","ancient","angle","angry",
"animal","announced","another","answer","ants","any","anybody","anyone",
"anything","anyway","anywhere","apart","apartment","appearance","apple","applied",
"appropriate","are","area","arm","army","around","arrange","arrangement",
"arrive","arrow","art","article","as","aside","ask","asleep", "at","ate","atmosphere","atom","atomic","attached","attack","attempt",
"attention","audience","author","automobile","available","average","avoid","aware",
"away","baby","back","bad","badly","bag","balance","ball", "balloon","band","bank","bar","bare","bark","barn","base",
"baseball","basic","basis","basket","bat","battle","be","bean",
"bear","beat","beautiful","beauty","became","because","become","becoming",
"bee","been","before","began","beginning","begun","behavior","behind",
"being","believed","bell","belong","below","belt","bend","beneath",
"bent","beside","best","bet","better","between","beyond","bicycle",
"bigger","biggest","bill","birds","birth","birthday","bit","bite", "black","blank","blanket","blew","blind","block","blood","blow",
"blue","board","boat","body","bone","book","border","born", "both","bottle","bottom","bound","bow","bowl","box","boy",
"brain","branch","brass","brave","bread","break","breakfast","breath",
"breathe","breathing","breeze","brick","bridge","brief","bright","bring",
"broad","broke","broken","brother","brought","brown","brush","buffalo",
"build","building","built","buried","burn","burst","bus","bush", "business","busy","but","butter","buy","by","cabin","cage",
"cake","call","calm","came","camera","camp","can","canal", "cannot","cap","capital","captain","captured","car","carbon","card",
"care","careful","carefully","carried","carry","case","cast","castle", "cat","catch","cattle","caught","cause","cave","cell","cent",
"center","central","century","certain","certainly","chain","chair","chamber",
"chance","change","changing","chapter","character","characteristic","charge","chart",
"check","cheese","chemical","chest","chicken","chief","child","children",
"choice","choose","chose","chosen","church","circle","circus","citizen",
"city","class","classroom","claws","clay","clean","clear","clearly",
"climate","climb","clock","close","closely","closer","cloth","clothes",
"clothing","cloud","club","coach","coal","coast","coat","coffee",
"cold","collect","college","colony","color","column","combination","combine",
"come","comfortable","coming","command","common","community","company","compare",
"compass","complete","completely","complex","composed","composition","compound","concerned",
"condition","congress","connected","consider","consist","consonant","constantly","construction",
"contain","continent","continued","contrast","control","conversation","cook","cookies",
"cool","copper","copy","corn","corner","correct","correctly","cost",
"cotton","could","count","country","couple","courage","course","court",
"cover","cow","cowboy","crack","cream","create","creature","crew", "crop","cross","crowd","cry","cup","curious","current","curve",
"customs","cut","cutting","daily","damage","dance","danger","dangerous",
"dark","darkness","date","daughter","dawn","day","dead","deal",
"dear","death","decide","declared","deep","deeply","deer","definition",
"degree","depend","depth","describe","desert","design","desk","detail",
"determine","develop","development","diagram","diameter","did","die","differ",
"difference","different","difficult","difficulty","dig","dinner","direct","direction",
"directly","dirt","dirty","disappear","discover","discovery","discuss","discussion",
"disease","dish","distance","distant","divide","division","do","doctor",
"does","dog","doing","doll","dollar","done","donkey","door", "dot","double","doubt","down","dozen","draw","drawn","dream",
"dress","drew","dried","drink","drive","driven","driver","driving", "drop","dropped","drove","dry","duck","due","dug","dull",
"during","dust","duty","each","eager","ear","earlier","early", "earn","earth","easier","easily","east","easy","eat","eaten",
"edge","education","effect","effort","egg","eight","either","electric",
"electricity","element","elephant","eleven","else","empty","end","enemy",
"energy","engine","engineer","enjoy","enough","enter","entire","entirely",
"environment","equal","equally","equator","equipment","escape","especially","essential",
"establish","even","evening","event","eventually","ever","every","everybody",
"everyone","everything","everywhere","evidence","exact","exactly","examine","example",
"excellent","except","exchange","excited","excitement","exciting","exclaimed","exercise",
"exist","expect","experience","experiment","explain","explanation","explore","express",
"expression","extra","eye","face","facing","fact","factor","factory",
"failed","fair","fairly","fall","fallen","familiar","family","famous",
"far","farm","farmer","farther","fast","fastened","faster","fat",
"father","favorite","fear","feathers","feature","fed","feed","feel", "feet","fell","fellow","felt","fence","few","fewer","field",
"fierce","fifteen","fifth","fifty","fight","fighting","figure","fill",
"film","final","finally","find","fine","finest","finger","finish", "fire","fireplace","firm","first","fish","five","fix","flag",
"flame","flat","flew","flies","flight","floating","floor","flow", "flower","fly","fog","folks","follow","food","foot","football",
"for","force","foreign","forest","forget","forgot","forgotten","form",
"former","fort","forth","forty","forward","fought","found","four",
"fourth","fox","frame","free","freedom","frequently","fresh","friend",
"friendly","frighten","frog","from","front","frozen","fruit","fuel",
"full","fully","fun","function","funny","fur","furniture","further",
"future","gain","game","garage","garden","gas","gasoline","gate",
"gather","gave","general","generally","gentle","gently","get","getting",
"giant","gift","girl","give","given","giving","glad","glass", "globe","go","goes","gold","golden","gone","good","goose",
"got","government","grabbed","grade","gradually","grain","grandfather","grandmother",
"graph","grass","gravity","gray","great","greater","greatest","greatly",
"green","grew","ground","group","grow","grown","growth","guard", "guess","guide","gulf","gun","habit","had","hair","half",
"halfway","hall","hand","handle","handsome","hang","happen","happened",
"happily","happy","harbor","hard","harder","hardly","has","hat", "have","having","hay","he","headed","heading","health","heard",
"hearing","heart","heat","heavy","height","held","hello","help", "helpful","her","herd","here","herself","hidden","hide","high",
"higher","highest","highway","hill","him","himself","his","history", "hit","hold","hole","hollow","home","honor","hope","horn",
"horse","hospital","hot","hour","house","how","however","huge", "human","hundred","hung","hungry","hunt","hunter","hurried","hurry",
"hurt","husband","ice","idea","identity","if","ill","image",
"imagine","immediately","importance","important","impossible","improve","in","inch",
"include","including","income","increase","indeed","independent","indicate","individual",
"industrial","industry","influence","information","inside","instance","instant","instead",
"instrument","interest","interior","into","introduced","invented","involved","iron",
"is","island","it","its","itself","jack","jar","jet", "job","join","joined","journey","joy","judge","jump","jungle",
"just","keep","kept","key","kids","kill","kind","kitchen", "knew","knife","know","knowledge","known","label","labor","lack",
"lady","laid","lake","lamp","land","language","large","larger", "largest","last","late","later","laugh","law","lay","layers",
"lead","leader","leaf","learn","least","leather","leave","leaving", "led","left","leg","length","lesson","let","letter","level",
"library","lie","life","lift","light","like","likely","limited", "line","lion","lips","liquid","list","listen","little","live",
"living","load","local","locate","location","log","lonely","long", "longer","look","loose","lose","loss","lost","lot","loud",
"love","lovely","low","lower","luck","lucky","lunch","lungs", "lying","machine","machinery","mad","made","magic","magnet","mail",
"main","mainly","major","make","making","man","managed","manner",
"manufacturing","many","map","mark","market","married","mass","massage",
"master","material","mathematics","matter","may","maybe","me","meal",
"mean","means","meant","measure","meat","medicine","meet","melted",
"member","memory","men","mental","merely","met","metal","method", "mice","middle","might","mighty","mile","military","milk","mill",
"mind","mine","minerals","minute","mirror","missing","mission","mistake",
"mix","mixture","model","modern","molecular","moment","money","monkey",
"month","mood","moon","more","morning","most","mostly","mother",
"motion","motor","mountain","mouse","mouth","move","movement","movie",
"moving","mud","muscle","music","musical","must","my","myself",
"mysterious","nails","name","nation","national","native","natural","naturally",
"nature","near","nearby","nearer","nearest","nearly","necessary","neck",
"needed","needle","needs","negative","neighbor","neighborhood","nervous","nest",
"never","new","news","newspaper","next","nice","night","nine", "no","nobody","nodded","noise","none","noon","nor","north",
"nose","not","note","noted","nothing","notice","noun","now",
"number","numeral","nuts","object","observe","obtain","occasionally","occur",
"ocean","of","off","offer","office","officer","official","oil", "old","older","oldest","on","once","one","only","onto",
"open","operation","opinion","opportunity","opposite","or","orange","orbit",
"order","ordinary","organization","organized","origin","original","other","ought",
"our","ourselves","out","outer","outline","outside","over","own", "owner","oxygen","pack","package","page","paid","pain","paint",
"pair","palace","pale","pan","paper","paragraph","parallel","parent",
"park","part","particles","particular","particularly","partly","parts","party",
"pass","passage","past","path","pattern","pay","peace","pen",
"pencil","people","per","percent","perfect","perfectly","perhaps","period",
"person","personal","pet","phrase","physical","piano","pick","picture", "pictured","pie","piece","pig","pile","pilot","pine","pink",
"pipe","pitch","place","plain","plan","plane","planet","planned",
"planning","plant","plastic","plate","plates","play","pleasant","please",
"pleasure","plenty","plural","plus","pocket","poem","poet","poetry",
"point","pole","police","policeman","political","pond","pony","pool",
"poor","popular","population","porch","port","position","positive","possible",
"possibly","post","pot","potatoes","pound","pour","powder","power",
"powerful","practical","practice","prepare","present","president","press","pressure",
"pretty","prevent","previous","price","pride","primitive","principal","principle",
"printed","private","prize","probably","problem","process","produce","product",
"production","program","progress","promised","proper","properly","property","protection",
"proud","prove","provide","public","pull","pupil","pure","purple",
"purpose","push","put","putting","quarter","queen","question","quick",
"quickly","quiet","quietly","quite","rabbit","race","radio","railroad",
"rain","raise","ran","ranch","range","rapidly","rate","rather", "raw","rays","reach","read","reader","ready","real","realize",
"rear","reason","recall","receive","recent","recently","recognize","record",
"red","refer","refused","region","regular","related","relationship","religious",
"remain","remarkable","remember","remove","repeat","replace","replied","report",
"represent","require","research","respect","rest","result","return","review",
"rhyme","rhythm","rice","rich","ride","riding","right","ring", "rise","rising","river","road","roar","rock","rocket","rocky",
"rod","roll","roof","room","root","rope","rose","rough", "round","route","row","rubbed","rubber","rule","ruler","run",
"running","rush","sad","saddle","safe","safety","said","sail", "sale","salmon","salt","same","sand","sang","sat","satellites",
"satisfied","save","saved","saw","say","scale","scared","scene",
"school","science","scientific","scientist","score","screen","sea","search",
"season","seat","second","secret","section","see","seed","seeing",
"seems","seen","seldom","select","selection","sell","send","sense",
"sent","sentence","separate","series","serious","serve","service","sets",
"setting","settle","settlers","seven","several","shade","shadow","shake",
"shaking","shall","shallow","shape","share","sharp","she","sheep",
"sheet","shelf","shells","shelter","shine","shinning","ship","shirt",
"shoe","shoot","shop","shore","short","shorter","shot","should", "shoulder","shout","show","shown","shut","sick","sides","sight",
"sign","signal","silence","silent","silk","silly","silver","similar",
"simple","simplest","simply","since","sing","single","sink","sister", "sit","sitting","situation","six","size","skill","skin","sky",
"slabs","slave","sleep","slept","slide","slight","slightly","slip",
"slipped","slope","slow","slowly","small","smaller","smallest","smell",
"smile","smoke","smooth","snake","snow","so","soap","social", "society","soft","softly","soil","solar","sold","soldier","solid",
"solution","solve","some","somebody","somehow","someone","something","sometime",
"somewhere","son","song","soon","sort","sound","source","south",
"southern","space","speak","special","species","specific","speech","speed",
"spell","spend","spent","spider","spin","spirit","spite","split",
"spoken","sport","spread","spring","square","stage","stairs","stand",
"standard","star","stared","start","state","statement","station","stay",
"steady","steam","steel","steep","stems","step","stepped","stick",
"stiff","still","stock","stomach","stone","stood","stop","stopped",
"store","storm","story","stove","straight","strange","stranger","straw",
"stream","street","strength","stretch","strike","string","strip","strong",
"stronger","struck","structure","struggle","stuck","student","studied","studying",
"subject","substance","success","successful","such","sudden","suddenly","sugar",
"suggest","suit","sum","summer","sun","sunlight","supper","supply",
"support","suppose","sure","surface","surprise","surrounded","swam","sweet",
"swept","swim","swimming","swing","swung","syllable","symbol","system", "table","tail","take","taken","tales","talk","tall","tank",
"tape","task","taste","taught","tax","tea","teach","teacher",
"team","tears","teeth","telephone","television","tell","temperature","ten",
"tent","term","terrible","test","than","thank","that","thee",
"them","themselves","then","theory","there","therefore","these","they", "thick","thin","thing","think","third","thirty","this",
"those", "thou","though","thought","thousand","thread","three","threw","throat",
"through","throughout","throw","thrown","thumb","thus","thy","tide", "tie","tight","tightly","till","time","tin","tiny","tip",
"tired","title","to","tobacco","today","together","told","tomorrow", "tone","tongue","tonight","too","took","tool","top","topic",
"torn","total","touch","toward","tower","town","toy","trace",
"track","trade","traffic","trail","train","transportation","trap","travel",
"treated","tree","triangle","tribe","trick","tried","trip","troops",
"tropical","trouble","truck","trunk","truth","try","tube","tune", "turn","twelve","twenty","twice","two","type","typical","uncle",
"under","underline","understanding","unhappy","union","unit","universe","unknown",
"unless","until","unusual","up","upon","upper","upward","us", "use","useful","using","usual","usually","valley","valuable","value",
"vapor","variety","various","vast","vegetable","verb","vertical","very",
"vessels","victory","view","village","visit","visitor","voice","volume",
"vote","vowel","voyage","wagon","wait","walk","wall","want", "war","warm","warn","was","wash","waste","watch","water",
"wave","way","we","weak","wealth","wear","weather","week", "weigh","weight","welcome","well","went","were","west","western",
"wet","whale","what","whatever","wheat","wheel","when","whenever",
"where","wherever","whether","which","while","whispered","whistle","white",
"who","whole","whom","whose","why","wide","widely","wife", "wild","will","willing","win","wind","window","wing","winter",
"wire","wise","wish","with","within","without","wolf","women", "won","wonder","wonderful","wood","wooden","wool","word","wore",
"work","worker","world","worried","worry","worse","worth","would",
"wrapped","write","writer","writing","written","wrong","wrote","yard",
"year","yellow","yes","yesterday","yet","you","young","younger", "your","yourself","youth","zero","zoo"];
def getWords():
word1 = words[random.randrange(0,len(words))]
word2 = words[random.randrange(0,len(words))]
word3 = words[random.randrange(0,len(words))]
print word1 + " " + word2 + " "+ word3
result = [word1, word2, word3]
#gi = GoogleImagesSearch()
#gi.query(word1)
#gi.query(word2)
#gi.query(word3)
return result
|
{
"content_hash": "10bf64995e3d3f87a62a29b395bc9d91",
"timestamp": "",
"source": "github",
"line_count": 212,
"max_line_length": 132,
"avg_line_length": 82.2877358490566,
"alnum_prop": 0.6415018629979937,
"repo_name": "ugosan/passwordgen",
"id": "f42e6bbb159647f403c75bf077f6d5ee507e99d9",
"size": "17445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/xkcd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105594"
},
{
"name": "Python",
"bytes": "131096"
}
],
"symlink_target": ""
}
|
import urllib
import twurl
import json
TWITTER_URL = 'https://api.twitter.com/1.1/friends/list.json'
while True:
print ''
acct = raw_input('Enter Twitter Account:')
if ( len(acct) < 1 ) : break
url = twurl.augment(TWITTER_URL,
{'screen_name': acct, 'count': '5'} )
print 'Retrieving', url
connection = urllib.urlopen(url)
data = connection.read()
headers = connection.info().dict
print 'Remaining', headers['x-rate-limit-remaining']
js = json.loads(data)
print json.dumps(js, indent=4)
for u in js['users'] :
print u['screen_name']
s = u['status']['text']
print ' ',s[:50]
|
{
"content_hash": "b67b882a0a84b563762972e5218f5b67",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 61,
"avg_line_length": 27.375,
"alnum_prop": 0.604261796042618,
"repo_name": "lastralab/Statistics",
"id": "90743ccd6e9e08ed8e4eb17139ff1eaf675ed27d",
"size": "657",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Specialization/Dr. Chuck-s Code/twitter2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "89"
},
{
"name": "Python",
"bytes": "249488"
},
{
"name": "Shell",
"bytes": "182"
}
],
"symlink_target": ""
}
|
"""Common utility class to help SDK harness to execute an SDF. """
import logging
import threading
from typing import TYPE_CHECKING
from typing import Any
from typing import NamedTuple
from typing import Optional
from typing import Tuple
from typing import Union
from apache_beam.transforms.core import WatermarkEstimatorProvider
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
from apache_beam.utils.windowed_value import WindowedValue
if TYPE_CHECKING:
from apache_beam.io.iobase import RestrictionProgress
from apache_beam.io.iobase import RestrictionTracker
from apache_beam.io.iobase import WatermarkEstimator
_LOGGER = logging.getLogger(__name__)
SplitResultPrimary = NamedTuple(
'SplitResultPrimary', [('primary_value', WindowedValue)])
SplitResultResidual = NamedTuple(
'SplitResultResidual',
[('residual_value', WindowedValue), ('current_watermark', Timestamp),
('deferred_timestamp', Optional[Duration])])
class ThreadsafeRestrictionTracker(object):
"""A thread-safe wrapper which wraps a `RestrictionTracker`.
This wrapper guarantees synchronization of modifying restrictions across
multi-thread.
"""
def __init__(self, restriction_tracker):
# type: (RestrictionTracker) -> None
from apache_beam.io.iobase import RestrictionTracker
if not isinstance(restriction_tracker, RestrictionTracker):
raise ValueError(
'Initialize ThreadsafeRestrictionTracker requires'
'RestrictionTracker.')
self._restriction_tracker = restriction_tracker
# Records an absolute timestamp when defer_remainder is called.
self._timestamp = None
self._lock = threading.RLock()
self._deferred_residual = None
self._deferred_timestamp = None # type: Optional[Union[Timestamp, Duration]]
def current_restriction(self):
with self._lock:
return self._restriction_tracker.current_restriction()
def try_claim(self, position):
with self._lock:
return self._restriction_tracker.try_claim(position)
def defer_remainder(self, deferred_time=None):
"""Performs self-checkpoint on current processing restriction with an
expected resuming time.
Self-checkpoint could happen during processing elements. When executing an
DoFn.process(), you may want to stop processing an element and resuming
later if current element has been processed quit a long time or you also
want to have some outputs from other elements. ``defer_remainder()`` can be
called on per element if needed.
Args:
deferred_time: A relative ``Duration`` that indicates the ideal time gap
between now and resuming, or an absolute ``Timestamp`` for resuming
execution time. If the time_delay is None, the deferred work will be
executed as soon as possible.
"""
# Record current time for calculating deferred_time later.
with self._lock:
self._timestamp = Timestamp.now()
if deferred_time and not isinstance(deferred_time, (Duration, Timestamp)):
raise ValueError(
'The timestamp of deter_remainder() should be a '
'Duration or a Timestamp, or None.')
self._deferred_timestamp = deferred_time
checkpoint = self.try_split(0)
if checkpoint:
_, self._deferred_residual = checkpoint
def check_done(self):
with self._lock:
return self._restriction_tracker.check_done()
def current_progress(self):
# type: () -> RestrictionProgress
with self._lock:
return self._restriction_tracker.current_progress()
def try_split(self, fraction_of_remainder):
with self._lock:
return self._restriction_tracker.try_split(fraction_of_remainder)
def deferred_status(self):
# type: () -> Optional[Tuple[Any, Duration]]
"""Returns deferred work which is produced by ``defer_remainder()``.
When there is a self-checkpoint performed, the system needs to fulfill the
DelayedBundleApplication with deferred_work for a ProcessBundleResponse.
The system calls this API to get deferred_residual with watermark together
to help the runner to schedule a future work.
Returns: (deferred_residual, time_delay) if having any residual, else None.
"""
if self._deferred_residual:
# If _deferred_timestamp is None, create Duration(0).
if not self._deferred_timestamp:
self._deferred_timestamp = Duration()
# If an absolute timestamp is provided, calculate the delta between
# the absoluted time and the time deferred_status() is called.
elif isinstance(self._deferred_timestamp, Timestamp):
self._deferred_timestamp = (self._deferred_timestamp - Timestamp.now())
# If a Duration is provided, the deferred time should be:
# provided duration - the spent time since the defer_remainder() is
# called.
elif isinstance(self._deferred_timestamp, Duration):
self._deferred_timestamp -= (Timestamp.now() - self._timestamp)
return self._deferred_residual, self._deferred_timestamp
return None
def is_bounded(self):
return self._restriction_tracker.is_bounded()
class RestrictionTrackerView(object):
"""A DoFn view of thread-safe RestrictionTracker.
The RestrictionTrackerView wraps a ThreadsafeRestrictionTracker and only
exposes APIs that will be called by a ``DoFn.process()``. During execution
time, the RestrictionTrackerView will be fed into the ``DoFn.process`` as a
restriction_tracker.
"""
def __init__(self, threadsafe_restriction_tracker):
# type: (ThreadsafeRestrictionTracker) -> None
if not isinstance(threadsafe_restriction_tracker,
ThreadsafeRestrictionTracker):
raise ValueError(
'Initialize RestrictionTrackerView requires '
'ThreadsafeRestrictionTracker.')
self._threadsafe_restriction_tracker = threadsafe_restriction_tracker
def current_restriction(self):
return self._threadsafe_restriction_tracker.current_restriction()
def try_claim(self, position):
return self._threadsafe_restriction_tracker.try_claim(position)
def defer_remainder(self, deferred_time=None):
self._threadsafe_restriction_tracker.defer_remainder(deferred_time)
def is_bounded(self):
self._threadsafe_restriction_tracker.is_bounded()
class ThreadsafeWatermarkEstimator(object):
"""A threadsafe wrapper which wraps a WatermarkEstimator with locking
mechanism to guarantee multi-thread safety.
"""
def __init__(self, watermark_estimator):
# type: (WatermarkEstimator) -> None
from apache_beam.io.iobase import WatermarkEstimator
if not isinstance(watermark_estimator, WatermarkEstimator):
raise ValueError('Initializing Threadsafe requires a WatermarkEstimator')
self._watermark_estimator = watermark_estimator
self._lock = threading.Lock()
def __getattr__(self, attr):
if hasattr(self._watermark_estimator, attr):
def method_wrapper(*args, **kw):
with self._lock:
return getattr(self._watermark_estimator, attr)(*args, **kw)
return method_wrapper
raise AttributeError(attr)
def get_estimator_state(self):
with self._lock:
return self._watermark_estimator.get_estimator_state()
def current_watermark(self):
# type: () -> Timestamp
with self._lock:
return self._watermark_estimator.current_watermark()
def observe_timestamp(self, timestamp):
# type: (Timestamp) -> None
if not isinstance(timestamp, Timestamp):
raise ValueError(
'Input of observe_timestamp should be a Timestamp '
'object')
with self._lock:
self._watermark_estimator.observe_timestamp(timestamp)
class NoOpWatermarkEstimatorProvider(WatermarkEstimatorProvider):
"""A WatermarkEstimatorProvider which creates NoOpWatermarkEstimator for the
framework.
"""
def initial_estimator_state(self, element, restriction):
return None
def create_watermark_estimator(self, estimator_state):
from apache_beam.io.iobase import WatermarkEstimator
class _NoOpWatermarkEstimator(WatermarkEstimator):
"""A No-op WatermarkEstimator which is provided for the framework if there
is no custom one.
"""
def observe_timestamp(self, timestamp):
pass
def current_watermark(self):
return None
def get_estimator_state(self):
return None
return _NoOpWatermarkEstimator()
|
{
"content_hash": "48befd5f8a504a841fdc7e445190442c",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 81,
"avg_line_length": 36.87336244541485,
"alnum_prop": 0.7205116058739933,
"repo_name": "apache/beam",
"id": "bbb6b2de6e85b26fcb43d5aab86174be2472f4d6",
"size": "9250",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/sdf_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "912687"
},
{
"name": "Dockerfile",
"bytes": "59805"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5508697"
},
{
"name": "Groovy",
"bytes": "936956"
},
{
"name": "HCL",
"bytes": "103872"
},
{
"name": "HTML",
"bytes": "184151"
},
{
"name": "Java",
"bytes": "41223435"
},
{
"name": "JavaScript",
"bytes": "119576"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "220768"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10728612"
},
{
"name": "Rust",
"bytes": "5168"
},
{
"name": "SCSS",
"bytes": "318364"
},
{
"name": "Sass",
"bytes": "25954"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "375834"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1997829"
}
],
"symlink_target": ""
}
|
"""Exercise 15: Reverse Word Order
Write a program (using functions!) that asks the user for a long string
containing multiple words. Print back to the user the same string,
except with the words in backwards order. For example, say I type the
string:
My name is Michele
Then I would see the string:
Michele is name My
shown back to me.
"""
def reverse_word_order(word: str):
return " ".join(word.split(" ")[::-1])
long_string = input("Write a long string: ")
print("The string in backwards order: %s" % reverse_word_order(long_string))
|
{
"content_hash": "5e67a1aac222d751995669fbe56ccd28",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 76,
"avg_line_length": 26.857142857142858,
"alnum_prop": 0.7021276595744681,
"repo_name": "lcnodc/codes",
"id": "8985efce71a4233ebda37ae03c835ba56f03fe85",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "09-revisao/practice_python/reverse_word_order.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "8874"
},
{
"name": "Python",
"bytes": "80970"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.utils import simplejson
import logging
from model.testfile import TestFile
JSON_RESULTS_FILE = "results.json"
JSON_RESULTS_FILE_SMALL = "results-small.json"
JSON_RESULTS_PREFIX = "ADD_RESULTS("
JSON_RESULTS_SUFFIX = ");"
JSON_RESULTS_VERSION_KEY = "version"
JSON_RESULTS_BUILD_NUMBERS = "buildNumbers"
JSON_RESULTS_TESTS = "tests"
JSON_RESULTS_RESULTS = "results"
JSON_RESULTS_TIMES = "times"
JSON_RESULTS_PASS = "P"
JSON_RESULTS_NO_DATA = "N"
JSON_RESULTS_MIN_TIME = 1
JSON_RESULTS_VERSION = 3
JSON_RESULTS_MAX_BUILDS = 750
JSON_RESULTS_MAX_BUILDS_SMALL = 200
class JsonResults(object):
@classmethod
def _strip_prefix_suffix(cls, data):
"""Strip out prefix and suffix of json results string.
Args:
data: json file content.
Returns:
json string without prefix and suffix.
"""
assert(data.startswith(JSON_RESULTS_PREFIX))
assert(data.endswith(JSON_RESULTS_SUFFIX))
return data[len(JSON_RESULTS_PREFIX):
len(data) - len(JSON_RESULTS_SUFFIX)]
@classmethod
def _generate_file_data(cls, json, sort_keys=False):
"""Given json string, generate file content data by adding
prefix and suffix.
Args:
json: json string without prefix and suffix.
Returns:
json file data.
"""
data = simplejson.dumps(json, separators=(',', ':'),
sort_keys=sort_keys)
return JSON_RESULTS_PREFIX + data + JSON_RESULTS_SUFFIX
@classmethod
def _load_json(cls, file_data):
"""Load json file to a python object.
Args:
file_data: json file content.
Returns:
json object or
None on failure.
"""
json_results_str = cls._strip_prefix_suffix(file_data)
if not json_results_str:
logging.warning("No json results data.")
return None
try:
return simplejson.loads(json_results_str)
except Exception, err:
logging.debug(json_results_str)
logging.error("Failed to load json results: %s", str(err))
return None
@classmethod
def _merge_json(cls, aggregated_json, incremental_json, num_runs):
"""Merge incremental json into aggregated json results.
Args:
aggregated_json: aggregated json object.
incremental_json: incremental json object.
num_runs: number of total runs to include.
Returns:
True if merge succeeds or
False on failure.
"""
# Merge non tests property data.
# Tests properties are merged in _merge_tests.
if not cls._merge_non_test_data(aggregated_json, incremental_json, num_runs):
return False
# Merge tests results and times
incremental_tests = incremental_json[JSON_RESULTS_TESTS]
if incremental_tests:
aggregated_tests = aggregated_json[JSON_RESULTS_TESTS]
cls._merge_tests(aggregated_tests, incremental_tests, num_runs)
return True
@classmethod
def _merge_non_test_data(cls, aggregated_json, incremental_json, num_runs):
"""Merge incremental non tests property data into aggregated json results.
Args:
aggregated_json: aggregated json object.
incremental_json: incremental json object.
num_runs: number of total runs to include.
Returns:
True if merge succeeds or
False on failure.
"""
incremental_builds = incremental_json[JSON_RESULTS_BUILD_NUMBERS]
aggregated_builds = aggregated_json[JSON_RESULTS_BUILD_NUMBERS]
aggregated_build_number = int(aggregated_builds[0])
# Loop through all incremental builds, start from the oldest run.
for index in reversed(range(len(incremental_builds))):
build_number = int(incremental_builds[index])
logging.debug("Merging build %s, incremental json index: %d.",
build_number, index)
# Return if not all build numbers in the incremental json results
# are newer than the most recent build in the aggregated results.
# FIXME: make this case work.
if build_number < aggregated_build_number:
logging.warning(("Build %d in incremental json is older than "
"the most recent build in aggregated results: %d"),
build_number, aggregated_build_number)
return False
# Return if the build number is duplicated.
# FIXME: skip the duplicated build and merge rest of the results.
# Need to be careful on skiping the corresponding value in
# _merge_tests because the property data for each test could
# be accumulated.
if build_number == aggregated_build_number:
logging.warning("Duplicate build %d in incremental json",
build_number)
return False
# Merge this build into aggreagated results.
cls._merge_one_build(aggregated_json, incremental_json, index, num_runs)
return True
@classmethod
def _merge_one_build(cls, aggregated_json, incremental_json,
incremental_index, num_runs):
"""Merge one build of incremental json into aggregated json results.
Args:
aggregated_json: aggregated json object.
incremental_json: incremental json object.
incremental_index: index of the incremental json results to merge.
num_runs: number of total runs to include.
"""
for key in incremental_json.keys():
# Merge json results except "tests" properties (results, times etc).
# "tests" properties will be handled separately.
if key == JSON_RESULTS_TESTS:
continue
if key in aggregated_json:
aggregated_json[key].insert(
0, incremental_json[key][incremental_index])
aggregated_json[key] = \
aggregated_json[key][:num_runs]
else:
aggregated_json[key] = incremental_json[key]
@classmethod
def _merge_tests(cls, aggregated_json, incremental_json, num_runs):
"""Merge "tests" properties:results, times.
Args:
aggregated_json: aggregated json object.
incremental_json: incremental json object.
num_runs: number of total runs to include.
"""
all_tests = (set(aggregated_json.iterkeys()) |
set(incremental_json.iterkeys()))
for test_name in all_tests:
if test_name in aggregated_json:
aggregated_test = aggregated_json[test_name]
if test_name in incremental_json:
incremental_test = incremental_json[test_name]
results = incremental_test[JSON_RESULTS_RESULTS]
times = incremental_test[JSON_RESULTS_TIMES]
else:
results = [[1, JSON_RESULTS_NO_DATA]]
times = [[1, 0]]
cls._insert_item_run_length_encoded(
results, aggregated_test[JSON_RESULTS_RESULTS], num_runs)
cls._insert_item_run_length_encoded(
times, aggregated_test[JSON_RESULTS_TIMES], num_runs)
cls._normalize_results_json(test_name, aggregated_json, num_runs)
else:
aggregated_json[test_name] = incremental_json[test_name]
@classmethod
def _insert_item_run_length_encoded(cls, incremental_item, aggregated_item, num_runs):
"""Inserts the incremental run-length encoded results into the aggregated
run-length encoded results.
Args:
incremental_item: incremental run-length encoded results.
aggregated_item: aggregated run-length encoded results.
num_runs: number of total runs to include.
"""
for item in incremental_item:
if len(aggregated_item) and item[1] == aggregated_item[0][1]:
aggregated_item[0][0] = min(
aggregated_item[0][0] + item[0], num_runs)
else:
aggregated_item.insert(0, item)
@classmethod
def _normalize_results_json(cls, test_name, aggregated_json, num_runs):
""" Prune tests where all runs pass or tests that no longer exist and
truncate all results to num_runs.
Args:
test_name: Name of the test.
aggregated_json: The JSON object with all the test results for
this builder.
num_runs: number of total runs to include.
"""
aggregated_test = aggregated_json[test_name]
aggregated_test[JSON_RESULTS_RESULTS] = \
cls._remove_items_over_max_number_of_builds(
aggregated_test[JSON_RESULTS_RESULTS], num_runs)
aggregated_test[JSON_RESULTS_TIMES] = \
cls._remove_items_over_max_number_of_builds(
aggregated_test[JSON_RESULTS_TIMES], num_runs)
is_all_pass = cls._is_results_all_of_type(
aggregated_test[JSON_RESULTS_RESULTS], JSON_RESULTS_PASS)
is_all_no_data = cls._is_results_all_of_type(
aggregated_test[JSON_RESULTS_RESULTS], JSON_RESULTS_NO_DATA)
max_time = max(
[time[1] for time in aggregated_test[JSON_RESULTS_TIMES]])
# Remove all passes/no-data from the results to reduce noise and
# filesize. If a test passes every run, but
# takes >= JSON_RESULTS_MIN_TIME to run, don't throw away the data.
if (is_all_no_data or
(is_all_pass and max_time < JSON_RESULTS_MIN_TIME)):
del aggregated_json[test_name]
@classmethod
def _remove_items_over_max_number_of_builds(cls, encoded_list, num_runs):
"""Removes items from the run-length encoded list after the final
item that exceeds the max number of builds to track.
Args:
encoded_results: run-length encoded results. An array of arrays, e.g.
[[3,'A'],[1,'Q']] encodes AAAQ.
num_runs: number of total runs to include.
"""
num_builds = 0
index = 0
for result in encoded_list:
num_builds = num_builds + result[0]
index = index + 1
if num_builds >= num_runs:
return encoded_list[:index]
return encoded_list
@classmethod
def _is_results_all_of_type(cls, results, type):
"""Returns whether all the results are of the given type
(e.g. all passes).
"""
return len(results) == 1 and results[0][1] == type
@classmethod
def _check_json(cls, builder, json):
"""Check whether the given json is valid.
Args:
builder: builder name this json is for.
json: json object to check.
Returns:
True if the json is valid or
False otherwise.
"""
version = json[JSON_RESULTS_VERSION_KEY]
if version > JSON_RESULTS_VERSION:
logging.error("Results JSON version '%s' is not supported.",
version)
return False
if not builder in json:
logging.error("Builder '%s' is not in json results.", builder)
return False
results_for_builder = json[builder]
if not JSON_RESULTS_BUILD_NUMBERS in results_for_builder:
logging.error("Missing build number in json results.")
return False
return True
@classmethod
def merge(cls, builder, aggregated, incremental, num_runs, sort_keys=False):
"""Merge incremental json file data with aggregated json file data.
Args:
builder: builder name.
aggregated: aggregated json file data.
incremental: incremental json file data.
sort_key: whether or not to sort key when dumping json results.
Returns:
Merged json file data if merge succeeds or
None on failure.
"""
if not incremental:
logging.warning("Nothing to merge.")
return None
logging.info("Loading incremental json...")
incremental_json = cls._load_json(incremental)
if not incremental_json:
return None
logging.info("Checking incremental json...")
if not cls._check_json(builder, incremental_json):
return None
logging.info("Loading existing aggregated json...")
aggregated_json = cls._load_json(aggregated)
if not aggregated_json:
return incremental
logging.info("Checking existing aggregated json...")
if not cls._check_json(builder, aggregated_json):
return incremental
logging.info("Merging json results...")
try:
if not cls._merge_json(aggregated_json[builder], incremental_json[builder], num_runs):
return None
except Exception, err:
logging.error("Failed to merge json results: %s", str(err))
return None
aggregated_json[JSON_RESULTS_VERSION_KEY] = JSON_RESULTS_VERSION
return cls._generate_file_data(aggregated_json, sort_keys)
@classmethod
def update(cls, master, builder, test_type, incremental):
"""Update datastore json file data by merging it with incremental json
file. Writes the large file and a small file. The small file just stores
fewer runs.
Args:
master: master name.
builder: builder name.
test_type: type of test results.
incremental: incremental json file data to merge.
Returns:
Large TestFile object if update succeeds or
None on failure.
"""
small_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE_SMALL, JSON_RESULTS_MAX_BUILDS_SMALL)
large_file_updated = cls.update_file(master, builder, test_type, incremental, JSON_RESULTS_FILE, JSON_RESULTS_MAX_BUILDS)
return small_file_updated and large_file_updated
@classmethod
def update_file(cls, master, builder, test_type, incremental, filename, num_runs):
files = TestFile.get_files(master, builder, test_type, filename)
if files:
file = files[0]
new_results = cls.merge(builder, file.data, incremental, num_runs)
else:
# Use the incremental data if there is no aggregated file to merge.
file = TestFile()
file.master = master
file.builder = builder
file.test_type = test_type
file.name = filename
new_results = incremental
logging.info("No existing json results, incremental json is saved.")
if not new_results or not file.save(new_results):
logging.info(
"Update failed, master: %s, builder: %s, test_type: %s, name: %s." %
(master, builder, test_type, filename))
return False
return True
@classmethod
def get_test_list(cls, builder, json_file_data):
"""Get list of test names from aggregated json file data.
Args:
json_file_data: json file data that has all test-data and
non-test-data.
Returns:
json file with test name list only. The json format is the same
as the one saved in datastore, but all non-test-data and test detail
results are removed.
"""
logging.debug("Loading test results json...")
json = cls._load_json(json_file_data)
if not json:
return None
logging.debug("Checking test results json...")
if not cls._check_json(builder, json):
return None
test_list_json = {}
tests = json[builder][JSON_RESULTS_TESTS]
test_list_json[builder] = {
"tests": dict.fromkeys(tests, {})}
return cls._generate_file_data(test_list_json)
|
{
"content_hash": "e15fffce2c08554533ee3beaa4838470",
"timestamp": "",
"source": "github",
"line_count": 445,
"max_line_length": 141,
"avg_line_length": 36.83595505617978,
"alnum_prop": 0.595473401659346,
"repo_name": "Xperia-Nicki/android_platform_sony_nicki",
"id": "d61a860564a2adc53f7d570c3aa0cc3b5406d8e2",
"size": "17922",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "external/webkit/Tools/TestResultServer/model/jsonresults.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "212775"
},
{
"name": "Awk",
"bytes": "19252"
},
{
"name": "C",
"bytes": "68667466"
},
{
"name": "C#",
"bytes": "55625"
},
{
"name": "C++",
"bytes": "54670920"
},
{
"name": "CLIPS",
"bytes": "12224"
},
{
"name": "CSS",
"bytes": "283405"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Java",
"bytes": "4882"
},
{
"name": "JavaScript",
"bytes": "19597804"
},
{
"name": "Objective-C",
"bytes": "5849156"
},
{
"name": "PHP",
"bytes": "17224"
},
{
"name": "Pascal",
"bytes": "42411"
},
{
"name": "Perl",
"bytes": "1632149"
},
{
"name": "Prolog",
"bytes": "214621"
},
{
"name": "Python",
"bytes": "3493321"
},
{
"name": "R",
"bytes": "290"
},
{
"name": "Ruby",
"bytes": "78743"
},
{
"name": "Scilab",
"bytes": "554"
},
{
"name": "Shell",
"bytes": "265637"
},
{
"name": "TypeScript",
"bytes": "45459"
},
{
"name": "XSLT",
"bytes": "11219"
}
],
"symlink_target": ""
}
|
"""
Client side of the senlin engine RPC API.
"""
from oslo_config import cfg
from senlin.common import consts
from senlin.common import messaging
from senlin.objects import base as object_base
_CLIENT = None
def get_engine_client():
global _CLIENT
if not _CLIENT:
_CLIENT = EngineClient()
return _CLIENT
class EngineClient(object):
"""Client side of the senlin engine rpc API.
Version History:
1.0 - Initial version (Mitaka 1.0 release)
1.1 - Add cluster-collect call.
"""
def __init__(self):
serializer = object_base.VersionedObjectSerializer()
self._client = messaging.get_rpc_client(consts.CONDUCTOR_TOPIC,
cfg.CONF.host,
serializer=serializer)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def call(self, ctxt, method, req, version=None):
"""The main entry for invoking engine service.
:param ctxt: The request context object.
:param method: The name of the method to be invoked.
:param req: A dict containing a request object.
:param version: The engine RPC API version requested.
"""
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.call(ctxt, method, req=req)
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.cast(ctxt, method, **kwargs)
|
{
"content_hash": "005c30c788489dcde3b361e321f08f74",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 71,
"avg_line_length": 28.016393442622952,
"alnum_prop": 0.6026916325336454,
"repo_name": "openstack/senlin",
"id": "3b02d69e57fff4fab1a1541891ba08132760465d",
"size": "2258",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senlin/rpc/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "69788"
},
{
"name": "Python",
"bytes": "3755028"
},
{
"name": "Shell",
"bytes": "24272"
}
],
"symlink_target": ""
}
|
"""Utility functions for training."""
import six
import tensorflow as tf
from deeplab.core import preprocess_utils
def _div_maybe_zero(total_loss, num_present):
"""Normalizes the total loss with the number of present pixels."""
return tf.to_float(num_present > 0) * tf.div(total_loss,
tf.maximum(1e-5, num_present))
def add_softmax_cross_entropy_loss_for_each_scale(scales_to_logits,
labels,
num_classes,
ignore_label,
loss_weight=1.0,
upsample_logits=True,
hard_example_mining_step=0,
top_k_percent_pixels=1.0,
scope=None):
"""Adds softmax cross entropy loss for logits of each scale.
Args:
scales_to_logits: A map from logits names for different scales to logits.
The logits have shape [batch, logits_height, logits_width, num_classes].
labels: Groundtruth labels with shape [batch, image_height, image_width, 1].
num_classes: Integer, number of target classes.
ignore_label: Integer, label to ignore.
loss_weight: Float, loss weight.
upsample_logits: Boolean, upsample logits or not.
hard_example_mining_step: An integer, the training step in which the hard
exampling mining kicks off. Note that we gradually reduce the mining
percent to the top_k_percent_pixels. For example, if
hard_example_mining_step = 100K and top_k_percent_pixels = 0.25, then
mining percent will gradually reduce from 100% to 25% until 100K steps
after which we only mine top 25% pixels.
top_k_percent_pixels: A float, the value lies in [0.0, 1.0]. When its value
< 1.0, only compute the loss for the top k percent pixels (e.g., the top
20% pixels). This is useful for hard pixel mining.
scope: String, the scope for the loss.
Raises:
ValueError: Label or logits is None.
"""
if labels is None:
raise ValueError('No label for softmax cross entropy loss.')
for scale, logits in six.iteritems(scales_to_logits):
loss_scope = None
if scope:
loss_scope = '%s_%s' % (scope, scale)
if upsample_logits:
# Label is not downsampled, and instead we upsample logits.
logits = tf.image.resize_bilinear(
logits,
preprocess_utils.resolve_shape(labels, 4)[1:3],
align_corners=True)
scaled_labels = labels
else:
# Label is downsampled to the same size as logits.
scaled_labels = tf.image.resize_nearest_neighbor(
labels,
preprocess_utils.resolve_shape(logits, 4)[1:3],
align_corners=True)
scaled_labels = tf.reshape(scaled_labels, shape=[-1])
not_ignore_mask = tf.to_float(tf.not_equal(scaled_labels,
ignore_label)) * loss_weight
one_hot_labels = tf.one_hot(
scaled_labels, num_classes, on_value=1.0, off_value=0.0)
if top_k_percent_pixels == 1.0:
# Compute the loss for all pixels.
tf.losses.softmax_cross_entropy(
one_hot_labels,
tf.reshape(logits, shape=[-1, num_classes]),
weights=not_ignore_mask,
scope=loss_scope)
else:
logits = tf.reshape(logits, shape=[-1, num_classes])
weights = not_ignore_mask
with tf.name_scope(loss_scope, 'softmax_hard_example_mining',
[logits, one_hot_labels, weights]):
one_hot_labels = tf.stop_gradient(
one_hot_labels, name='labels_stop_gradient')
pixel_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
labels=one_hot_labels,
logits=logits,
name='pixel_losses')
weighted_pixel_losses = tf.multiply(pixel_losses, weights)
num_pixels = tf.to_float(tf.shape(logits)[0])
# Compute the top_k_percent pixels based on current training step.
if hard_example_mining_step == 0:
# Directly focus on the top_k pixels.
top_k_pixels = tf.to_int32(top_k_percent_pixels * num_pixels)
else:
# Gradually reduce the mining percent to top_k_percent_pixels.
global_step = tf.to_float(tf.train.get_or_create_global_step())
ratio = tf.minimum(1.0, global_step / hard_example_mining_step)
top_k_pixels = tf.to_int32(
(ratio * top_k_percent_pixels + (1.0 - ratio)) * num_pixels)
top_k_losses, _ = tf.nn.top_k(weighted_pixel_losses,
k=top_k_pixels,
sorted=True,
name='top_k_percent_pixels')
total_loss = tf.reduce_sum(top_k_losses)
num_present = tf.reduce_sum(
tf.to_float(tf.not_equal(top_k_losses, 0.0)))
loss = _div_maybe_zero(total_loss, num_present)
tf.losses.add_loss(loss)
def get_model_init_fn(train_logdir,
tf_initial_checkpoint,
initialize_last_layer,
last_layers,
ignore_missing_vars=False):
"""Gets the function initializing model variables from a checkpoint.
Args:
train_logdir: Log directory for training.
tf_initial_checkpoint: TensorFlow checkpoint for initialization.
initialize_last_layer: Initialize last layer or not.
last_layers: Last layers of the model.
ignore_missing_vars: Ignore missing variables in the checkpoint.
Returns:
Initialization function.
"""
if tf_initial_checkpoint is None:
tf.logging.info('Not initializing the model from a checkpoint.')
return None
if tf.train.latest_checkpoint(train_logdir):
tf.logging.info('Ignoring initialization; other checkpoint exists')
return None
tf.logging.info('Initializing model from path: %s', tf_initial_checkpoint)
# Variables that will not be restored.
exclude_list = ['global_step']
if not initialize_last_layer:
exclude_list.extend(last_layers)
variables_to_restore = tf.contrib.framework.get_variables_to_restore(
exclude=exclude_list)
if variables_to_restore:
init_op, init_feed_dict = tf.contrib.framework.assign_from_checkpoint(
tf_initial_checkpoint,
variables_to_restore,
ignore_missing_vars=ignore_missing_vars)
global_step = tf.train.get_or_create_global_step()
def restore_fn(unused_scaffold, sess):
sess.run(init_op, init_feed_dict)
sess.run([global_step])
return restore_fn
return None
def get_model_gradient_multipliers(last_layers, last_layer_gradient_multiplier):
"""Gets the gradient multipliers.
The gradient multipliers will adjust the learning rates for model
variables. For the task of semantic segmentation, the models are
usually fine-tuned from the models trained on the task of image
classification. To fine-tune the models, we usually set larger (e.g.,
10 times larger) learning rate for the parameters of last layer.
Args:
last_layers: Scopes of last layers.
last_layer_gradient_multiplier: The gradient multiplier for last layers.
Returns:
The gradient multiplier map with variables as key, and multipliers as value.
"""
gradient_multipliers = {}
for var in tf.model_variables():
# Double the learning rate for biases.
if 'biases' in var.op.name:
gradient_multipliers[var.op.name] = 2.
# Use larger learning rate for last layer variables.
for layer in last_layers:
if layer in var.op.name and 'biases' in var.op.name:
gradient_multipliers[var.op.name] = 2 * last_layer_gradient_multiplier
break
elif layer in var.op.name:
gradient_multipliers[var.op.name] = last_layer_gradient_multiplier
break
return gradient_multipliers
def get_model_learning_rate(learning_policy,
base_learning_rate,
learning_rate_decay_step,
learning_rate_decay_factor,
training_number_of_steps,
learning_power,
slow_start_step,
slow_start_learning_rate,
slow_start_burnin_type='none'):
"""Gets model's learning rate.
Computes the model's learning rate for different learning policy.
Right now, only "step" and "poly" are supported.
(1) The learning policy for "step" is computed as follows:
current_learning_rate = base_learning_rate *
learning_rate_decay_factor ^ (global_step / learning_rate_decay_step)
See tf.train.exponential_decay for details.
(2) The learning policy for "poly" is computed as follows:
current_learning_rate = base_learning_rate *
(1 - global_step / training_number_of_steps) ^ learning_power
Args:
learning_policy: Learning rate policy for training.
base_learning_rate: The base learning rate for model training.
learning_rate_decay_step: Decay the base learning rate at a fixed step.
learning_rate_decay_factor: The rate to decay the base learning rate.
training_number_of_steps: Number of steps for training.
learning_power: Power used for 'poly' learning policy.
slow_start_step: Training model with small learning rate for the first
few steps.
slow_start_learning_rate: The learning rate employed during slow start.
slow_start_burnin_type: The burnin type for the slow start stage. Can be
`none` which means no burnin or `linear` which means the learning rate
increases linearly from slow_start_learning_rate and reaches
base_learning_rate after slow_start_steps.
Returns:
Learning rate for the specified learning policy.
Raises:
ValueError: If learning policy or slow start burnin type is not recognized.
"""
global_step = tf.train.get_or_create_global_step()
adjusted_global_step = global_step
if slow_start_burnin_type != 'none':
adjusted_global_step -= slow_start_step
if learning_policy == 'step':
learning_rate = tf.train.exponential_decay(
base_learning_rate,
adjusted_global_step,
learning_rate_decay_step,
learning_rate_decay_factor,
staircase=True)
elif learning_policy == 'poly':
learning_rate = tf.train.polynomial_decay(
base_learning_rate,
adjusted_global_step,
training_number_of_steps,
end_learning_rate=0,
power=learning_power)
else:
raise ValueError('Unknown learning policy.')
adjusted_slow_start_learning_rate = slow_start_learning_rate
if slow_start_burnin_type == 'linear':
# Do linear burnin. Increase linearly from slow_start_learning_rate and
# reach base_learning_rate after (global_step >= slow_start_steps).
adjusted_slow_start_learning_rate = (
slow_start_learning_rate +
(base_learning_rate - slow_start_learning_rate) *
tf.to_float(global_step) / slow_start_step)
elif slow_start_burnin_type != 'none':
raise ValueError('Unknown burnin type.')
# Employ small learning rate at the first few steps for warm start.
return tf.where(global_step < slow_start_step,
adjusted_slow_start_learning_rate, learning_rate)
|
{
"content_hash": "d075ddfe49cea7032e81bc56e38a3688",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 80,
"avg_line_length": 40.81494661921708,
"alnum_prop": 0.6331851076815764,
"repo_name": "derekjchow/models",
"id": "477fdebc35a890f7410a5b3874e5ffcb4ed2cc19",
"size": "12157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/deeplab/utils/train_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
}
|
"""
WSGI config for code_god project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "code_god.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "f773d2c09aa7adf34c738e94f224b473",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 27.928571428571427,
"alnum_prop": 0.7698209718670077,
"repo_name": "caihaoyu/code_god",
"id": "f9ec53130ef9b6a98b80bc0f60068c6aa15dc826",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code_god/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "641665"
},
{
"name": "CoffeeScript",
"bytes": "105801"
},
{
"name": "JavaScript",
"bytes": "1036595"
},
{
"name": "Python",
"bytes": "28695"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class FollowersConfig(AppConfig):
name = 'apps.followers'
|
{
"content_hash": "d3b0e1a368c6611a2e8b25d17fb40521",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 33,
"avg_line_length": 19.6,
"alnum_prop": 0.7653061224489796,
"repo_name": "kamilgregorczyk/instalike",
"id": "67e8d2edefacdc905f9d8b1e5e49d3536b345bfc",
"size": "98",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/followers/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "286"
},
{
"name": "Python",
"bytes": "63452"
},
{
"name": "Shell",
"bytes": "876"
}
],
"symlink_target": ""
}
|
from TwitterSearch import *
from mapgeist.api import text_mind_map
from mapgeist.visualization import visualize_tree_2D
#Text phrase to be searched on Twitter
search_text = 'artificial intelligence'
#Number of nodes required in MindMap
N = 50
#Filepath (png format) to store the MindMap to
mappath = 'MindMap.png'
try:
#Create a TwitterSearchOrder object
tso = TwitterSearchOrder()
tso.set_keywords([search_text])
#Set language here
tso.set_language('en')
#You will need to have your own Twitter App tokens
#More info here:
ts = TwitterSearch(
consumer_key = '',
consumer_secret = '',
access_token = '',
access_token_secret = ''
)
i = 0
f = open('tweets.txt', 'w')
for tweet in ts.search_tweets_iterable(tso):
line = str(tweet['text'].encode('utf-8'))
terms = line.split(' ')
termlist = []
for x in terms:
try:
#Do some rudimentary preprocessing
if x == 'RT':
continue
elif x[0] == '@':
continue
elif x[0] == '#':
termlist.append(x[1:])
elif '/' in x:
continue
else:
if len(x) > 3:
termlist.append(x)
except:
pass
line = ' '.join(termlist) + '.'
if len(line) > 3:
f.write(line+'\n')
i += 1
if i > 1000:
break
f.close()
except TwitterSearchException as e:
print(e)
#Use MapGeist now
mindmap, root = text_mind_map('tweets.txt', N)
visualize_tree_2D(mindmap, root, mappath)
|
{
"content_hash": "7a55e8fe5a5b48d992afc1b5e6b2883e",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 54,
"avg_line_length": 25.294117647058822,
"alnum_prop": 0.5215116279069767,
"repo_name": "sachinrjoglekar/MapGeist",
"id": "80cb584151104f92fac0dccc81ab731578863f3d",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/twitter_mapgeist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "4222"
},
{
"name": "Python",
"bytes": "19180"
}
],
"symlink_target": ""
}
|
import os
import shutil
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from filebrowser.settings import DIRECTORY, VERSIONS_BASEDIR
from filebrowser.base import FileObject
from filebrowser.sites import site
class FilebrowserTestCase(TestCase):
@classmethod
def setUpClass(cls):
super(FilebrowserTestCase, cls).setUpClass()
cls.user = User.objects.create_user('testuser', 'test@domain.com', 'password')
cls.user.is_staff = True
cls.user.save()
def setUp(self):
self.DIRECTORY = DIRECTORY
self.TEST_PATH = os.path.join(site.storage.location, '_test')
self.DIRECTORY_PATH = os.path.join(site.storage.location, DIRECTORY)
self.VERSIONS_PATH = os.path.join(site.storage.location, VERSIONS_BASEDIR)
if os.path.exists(self.TEST_PATH):
raise Exception('TEST_PATH Already Exists')
self.TEMP_PATH = os.path.join(self.TEST_PATH, 'tempfolder')
self.FOLDER_PATH = os.path.join(self.DIRECTORY_PATH, 'folder')
self.SUBFOLDER_PATH = os.path.join(self.FOLDER_PATH, 'subfolder')
self.CREATEFOLDER_PATH = os.path.join(self.DIRECTORY_PATH, 'create')
self.PLACEHOLDER_PATH = os.path.join(self.DIRECTORY_PATH, 'placeholders')
self.STATIC_IMG_PATH = os.path.join(settings.BASE_DIR, 'filebrowser', "static", "filebrowser", "img", "testimage.jpg")
self.STATIC_IMG_BAD_NAME_PATH = os.path.join(settings.BASE_DIR, 'filebrowser', "static", "filebrowser", "img", "TEST IMAGE 000.jpg")
self.F_IMAGE = FileObject(os.path.join(DIRECTORY, 'folder', "testimage.jpg"), site=site)
self.F_MISSING = FileObject(os.path.join(DIRECTORY, 'folder', "missing.jpg"), site=site)
self.F_FOLDER = FileObject(os.path.join(DIRECTORY, 'folder'), site=site)
self.F_SUBFOLDER = FileObject(os.path.join(DIRECTORY, 'folder', 'subfolder'), site=site)
self.F_CREATEFOLDER = FileObject(os.path.join(DIRECTORY, 'create'), site=site)
self.F_TEMPFOLDER = FileObject(os.path.join('_test', 'tempfolder'), site=site)
os.makedirs(self.FOLDER_PATH)
os.makedirs(self.SUBFOLDER_PATH)
def tearDown(self):
shutil.rmtree(self.TEST_PATH)
|
{
"content_hash": "d9f936f1b1a3abfe2c6316dc302f1447",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 140,
"avg_line_length": 43.67307692307692,
"alnum_prop": 0.6856010568031704,
"repo_name": "michalwerner/django-filebrowser-tinymce4",
"id": "3c4f0d27fe3521256bbe62fb8bd82f025c884131",
"size": "2271",
"binary": false,
"copies": "2",
"ref": "refs/heads/tinymce4",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11293"
},
{
"name": "HTML",
"bytes": "54724"
},
{
"name": "JavaScript",
"bytes": "50579"
},
{
"name": "Python",
"bytes": "166690"
}
],
"symlink_target": ""
}
|
from perfcomp import ansbile_playbook, pip_diff, rpm_diff
from perfcomp.graphs import graph_ansible_playbook
class JobDiff:
def __init__(self, good, bad, ansible_playbooks_diff, rpm_diff_b,
pip_diff_b):
self.good, self.bad = good, bad
self.ansible_diff = ansible_playbooks_diff
self.rpm_diff_b = rpm_diff_b
self.pip_diff_b = pip_diff_b
def ansible_playbooks_diff(self):
data = ansbile_playbook.compare(self.good, self.bad)
images = {}
for i in data:
images[i] = graph_ansible_playbook(data[i], i) if data[i] else None
return {'ans_data': data, 'images': images}
def rpm_files_diff(self):
inline, uniq1, uniq2 = rpm_diff.rpms(self.good, self.bad)
# sometimes we need to inmprove the diff
inline, uniq1, uniq2 = rpm_diff.check_packages(inline, uniq1, uniq2)
colored_inline = [rpm_diff.colorize_diff(i) for i in inline]
inline_with_links = rpm_diff.add_github_links(inline, colored_inline)
return {
'inline': inline_with_links, "uniq1": uniq1, "uniq2": uniq2,
'rpms_diff_max_length': max([len(li) for li in (uniq1, uniq2)])
}
def pip_files_diff(self):
inline, uniq1, uniq2 = pip_diff.pip_modules(self.good, self.bad)
return {
'pip_inline': inline, "pip_uniq1": uniq1, "pip_uniq2": uniq2,
'pip_diff_max_length': max([len(li) for li in (uniq1, uniq2)])
}
def generate(self):
data_results = {}
# if self.ansible_diff:
# data_results.update(self.ansible_playbooks_diff())
if self.rpm_diff_b:
data_results.update(self.rpm_files_diff())
if self.pip_diff_b:
data_results.update(self.pip_files_diff())
return data_results
|
{
"content_hash": "a14fea521d7920bfe5ab917bbc4a0752",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 79,
"avg_line_length": 39.04255319148936,
"alnum_prop": 0.5989100817438692,
"repo_name": "rdo-infra/ci-config",
"id": "882c0927d56ca95093d1f5ed616e9cd7c4b293de",
"size": "1835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jcomparison/perfcomp/jobdiff.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "Dockerfile",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "30459"
},
{
"name": "HTML",
"bytes": "31437"
},
{
"name": "Jinja",
"bytes": "9855"
},
{
"name": "Python",
"bytes": "791140"
},
{
"name": "Shell",
"bytes": "63444"
}
],
"symlink_target": ""
}
|
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0046_site_name_remove_null'),
('tests', '0051_tag_verbose_name'),
]
operations = [
migrations.AlterModelOptions(
name='customdocument',
options={},
),
migrations.AlterUniqueTogether(
name='customdocument',
unique_together={('title', 'collection')},
),
migrations.AlterUniqueTogether(
name='customimage',
unique_together={('title', 'collection')},
),
]
|
{
"content_hash": "ee7b9194cbb9fe62f19220e42f28d8b6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 54,
"avg_line_length": 25.583333333333332,
"alnum_prop": 0.5537459283387622,
"repo_name": "torchbox/wagtail",
"id": "7624bc6b6684570708f2429f6043c38a31267739",
"size": "664",
"binary": false,
"copies": "8",
"ref": "refs/heads/stable/2.15.x",
"path": "wagtail/tests/testapp/migrations/0052_custom_doc_image_unique_together.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "178240"
},
{
"name": "HTML",
"bytes": "307456"
},
{
"name": "JavaScript",
"bytes": "123792"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Python",
"bytes": "2786743"
},
{
"name": "Shell",
"bytes": "7997"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append('/users/wege/t1/waipy/read_the_docs/source')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode', 'matplotlib.sphinxext.plot_directive', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Wavelet Analysis'
copyright = u'2013, Mabel Calim Costa'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'WaveletAnalysisdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'WaveletAnalysis.tex', u'Wavelet Analysis Documentation',
u'Mabel Calim Costa', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'waveletanalysis', u'Wavelet Analysis Documentation',
[u'Mabel Calim Costa'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'WaveletAnalysis', u'Wavelet Analysis Documentation',
u'Mabel Calim Costa', 'WaveletAnalysis', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
{
"content_hash": "a1791cb8e382634f42fe86ed1bf31ed9",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 188,
"avg_line_length": 33.27947598253275,
"alnum_prop": 0.7055504526964965,
"repo_name": "mabelcalim/waipy",
"id": "a2e97198035a63507c5293363dd2820dceacc1d7",
"size": "8048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "read_the_docs/source/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32653"
},
{
"name": "HTML",
"bytes": "352661"
},
{
"name": "JavaScript",
"bytes": "80947"
},
{
"name": "Jupyter Notebook",
"bytes": "41154531"
},
{
"name": "Makefile",
"bytes": "5609"
},
{
"name": "Python",
"bytes": "75944"
}
],
"symlink_target": ""
}
|
"""Sensitivity lists."""
from . import HDLObject
from .signal import HDLSignal, HDLSignalSlice
from .port import HDLModulePort
class HDLSensitivityDescriptor(HDLObject):
"""Signal sensitivity descriptor."""
_sens_types = ["rise", "fall", "both", "any"]
def __init__(self, sens_type, sig=None):
"""Initialize."""
if sens_type not in self._sens_types:
raise ValueError(
"illegal sensitivity" ' type: "{}"'.format(sens_type)
)
if isinstance(sig, HDLModulePort):
sig = sig.signal
elif sig is None and sens_type != "any":
raise ValueError("signal cannot be None")
if not isinstance(sig, (HDLSignal, HDLSignalSlice, type(None))):
raise TypeError("sig must be HDLSignal or HDLSignalSlice")
self.sens_type = sens_type
self.signal = sig
def dumps(self):
"""Get representation."""
if self.sens_type == "rise":
ret_str = "rise({})".format(self.signal.dumps(decl=False))
elif self.sens_type == "fall":
ret_str = "fall({})".format(self.signal.dumps(decl=False))
elif self.sens_type == "both":
ret_str = "both({})".format(self.signal.dumps(decl=False))
elif self.sens_type == "any":
ret_str = self.signal.dumps(decl=False)
return ret_str
class HDLSensitivityList(HDLObject):
"""Sensitivity list."""
def __init__(self, *descrs):
"""Initialize."""
self.items = []
self.add(*descrs)
def add(self, *descrs):
"""Add descriptors."""
for descr in descrs:
if not isinstance(descr, HDLSensitivityDescriptor):
raise TypeError("only HDLSensitivityDescriptor allowed")
# no duplicate checking!!!
self.items.append(descr)
def __len__(self):
"""Get item count."""
return len(self.items)
def __getitem__(self, _slice):
"""Get items."""
return self.items[_slice]
def dumps(self):
"""Get representation."""
return "[{}]".format(",".join([x.dumps() for x in self.items]))
|
{
"content_hash": "5c70c8a76c5eb990c42b3e5a25347dcb",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 72,
"avg_line_length": 30.083333333333332,
"alnum_prop": 0.5678670360110804,
"repo_name": "brunosmmm/hdltools",
"id": "f81a64dc0ad10139bf76b994e0e2d8cdb3c59939",
"size": "2166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hdltools/abshdl/sens.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "470883"
},
{
"name": "Shell",
"bytes": "1354"
},
{
"name": "Verilog",
"bytes": "19781"
}
],
"symlink_target": ""
}
|
"""Run the linting rules on the playbook with given args"""
import multiprocessing
from ansible.cli.playbook import PlaybookCLI # pylint: disable=E0611,F0401
from collections import defaultdict
from interceptor import intercept
from linter.utils import isLTS, suppressConsoleOut
from linter.utils.composite_queue import CompositeQueue
# Override multiprocess Queue to accommodate interceptor's queue.
multiprocessing.Queue = CompositeQueue
class Runner(object):
"""Run playbook in check mode to lint with given rules"""
def __init__(self, ansible_pbook_args, rules):
"""Initialise runner with playbook args, rules to be run and the error data store"""
self.ansible_pbook_args = ansible_pbook_args
self.rules = isLTS(rules) and rules or [rules]
# TODO: Make it ordered dict.
self.errors = defaultdict(set)
def apply_rules(self):
"""Apply lint rules
Intercept classes using the aspects, both, declared by rules.
"""
for rule in self.rules:
rule_obj = rule(self.errors)
for target_classes, aspects in rule_obj.aspects.iteritems():
if not isLTS(target_classes):
target_classes = [target_classes]
for _class in target_classes:
intercept(aspects)(_class)
@suppressConsoleOut
def run_pbook(self):
"""Run playbook in check mode with console-stdout suppressed"""
for flag in ('--check',):
if flag not in self.ansible_pbook_args:
self.ansible_pbook_args.append(flag)
obj = PlaybookCLI(self.ansible_pbook_args)
obj.parse()
obj.run()
def format_errors(self):
"""Output formatting for errors, if there are."""
if not self.errors:
print "Valid Playbook"
return
for task, errors in self.errors.items():
# TODO: Hosts or groups as relevant in the error messages.
if task == "unused_vars":
print 'Unused vars:{0}{1}{0}'.format('\n', ', '.join(errors))
continue
elif task == "conflicting_vars":
print 'Conflicting vars:{0}{1}{0}'.format('\n', '\n'.join(errors))
continue
print 'Task: {1}{0}{2}{0}'.format('\n', task, '\n'.join(errors))
if task == "setup":
print "Couldn't lint the above hosts as their setup failed. Fix and re-lint\n"
def run(self):
"""Method to invoke playbook and apply linters."""
self.apply_rules()
self.run_pbook()
self.format_errors()
|
{
"content_hash": "9ace26c24f53674681086320f637b971",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 94,
"avg_line_length": 38.20289855072464,
"alnum_prop": 0.6073596358118362,
"repo_name": "host-anshu/lint-playbook",
"id": "25f20e8ec725c6828f448426192ce4c5ca5133ec",
"size": "2636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "linter/runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28736"
}
],
"symlink_target": ""
}
|
from nose.tools import assert_equal
from kmodes.kmodes import get_max_value_key
def test_mode_from_dict():
max_key = get_max_value_key({'a': 3, 'b': 10, 'c': -1, 'd': 9.9})
assert_equal('b', max_key)
|
{
"content_hash": "e7634f10692b1afb2c7a5df6b6b11988",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 30,
"alnum_prop": 0.6333333333333333,
"repo_name": "nkhuyu/kmodes",
"id": "7097143cc2845c53b21e2e3b291a3130a6b06d44",
"size": "211",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kmodes/tests/test_kmodes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25050"
}
],
"symlink_target": ""
}
|
import os
import sys
print("\nStripping input reference numbers from code cells...")
# Find all files to work with.
path_to_notebooks = '/srv/projects/intro_programming/intro_programming/notebooks/'
filenames = []
for filename in os.listdir(path_to_notebooks):
if '.html' in filename and filename != 'index.html':
filenames.append(filename)
# one file for testing:
#filenames = ['hello_world.html']
for filename in filenames:
f = open(path_to_notebooks + filename, 'r')
lines = f.readlines()
f.close()
f = open(path_to_notebooks + filename, 'wb')
for line in lines:
# Unwanted lines have opening and closing div on same line,
# with input reference number between them.
if ('<div class="prompt input_prompt">' in line
and '</div>' in line):
# Don't write this line.
continue
else:
# Regular line, write it.
f.write(line.encode('utf-8'))
f.close()
print(" Stripped input reference numbers.\n")
|
{
"content_hash": "4f23c89f96fa7c0ad6a10f7b7528e5e1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 82,
"avg_line_length": 28.486486486486488,
"alnum_prop": 0.6176470588235294,
"repo_name": "BadWizard/intro_programming",
"id": "2ab8878b1a362f079adf49a971ef71aa7677a4ea",
"size": "1306",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/remove_input_references.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4660"
},
{
"name": "HTML",
"bytes": "17849"
},
{
"name": "JavaScript",
"bytes": "1647"
},
{
"name": "Python",
"bytes": "27341"
},
{
"name": "Shell",
"bytes": "6939"
},
{
"name": "Smarty",
"bytes": "736"
}
],
"symlink_target": ""
}
|
"""
Sensor for Last.fm account status.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.lastfm/
"""
import logging
import re
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_API_KEY
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
REQUIREMENTS = ['pylast==2.4.0']
_LOGGER = logging.getLogger(__name__)
ATTR_LAST_PLAYED = 'last_played'
ATTR_PLAY_COUNT = 'play_count'
ATTR_TOP_PLAYED = 'top_played'
CONF_USERS = 'users'
ICON = 'mdi:lastfm'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_USERS, default=[]): vol.All(cv.ensure_list, [cv.string]),
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Last.fm sensor platform."""
import pylast as lastfm
from pylast import WSError
api_key = config[CONF_API_KEY]
users = config.get(CONF_USERS)
lastfm_api = lastfm.LastFMNetwork(api_key=api_key)
entities = []
for username in users:
try:
lastfm_api.get_user(username).get_image()
entities.append(LastfmSensor(username, lastfm_api))
except WSError as error:
_LOGGER.error(error)
return
add_entities(entities, True)
class LastfmSensor(Entity):
"""A class for the Last.fm account."""
def __init__(self, user, lastfm):
"""Initialize the sensor."""
self._user = lastfm.get_user(user)
self._name = user
self._lastfm = lastfm
self._state = "Not Scrobbling"
self._playcount = None
self._lastplayed = None
self._topplayed = None
self._cover = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def entity_id(self):
"""Return the entity ID."""
return 'sensor.lastfm_{}'.format(self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Update device state."""
self._cover = self._user.get_image()
self._playcount = self._user.get_playcount()
last = self._user.get_recent_tracks(limit=2)[0]
self._lastplayed = "{} - {}".format(
last.track.artist, last.track.title)
top = self._user.get_top_tracks(limit=1)[0]
toptitle = re.search("', '(.+?)',", str(top))
topartist = re.search("'(.+?)',", str(top))
self._topplayed = "{} - {}".format(
topartist.group(1), toptitle.group(1))
if self._user.get_now_playing() is None:
self._state = "Not Scrobbling"
return
now = self._user.get_now_playing()
self._state = "{} - {}".format(now.artist, now.title)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_LAST_PLAYED: self._lastplayed,
ATTR_PLAY_COUNT: self._playcount,
ATTR_TOP_PLAYED: self._topplayed,
}
@property
def entity_picture(self):
"""Avatar of the user."""
return self._cover
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
{
"content_hash": "2b36f893eb8b15b5c886ff22ba09d2b4",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 79,
"avg_line_length": 28.231404958677686,
"alnum_prop": 0.6083138173302107,
"repo_name": "tinloaf/home-assistant",
"id": "d92fe8b53bfcd49b9444ea0d30d7a872c9d0fcec",
"size": "3416",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/lastfm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class MaintenanceRedeployStatus(Model):
"""Maintenance Operation Status.
:param is_customer_initiated_maintenance_allowed: True, if customer is
allowed to perform Maintenance.
:type is_customer_initiated_maintenance_allowed: bool
:param pre_maintenance_window_start_time: Start Time for the Pre
Maintenance Window.
:type pre_maintenance_window_start_time: datetime
:param pre_maintenance_window_end_time: End Time for the Pre Maintenance
Window.
:type pre_maintenance_window_end_time: datetime
:param maintenance_window_start_time: Start Time for the Maintenance
Window.
:type maintenance_window_start_time: datetime
:param maintenance_window_end_time: End Time for the Maintenance Window.
:type maintenance_window_end_time: datetime
:param last_operation_result_code: The Last Maintenance Operation Result
Code. Possible values include: 'None', 'RetryLater', 'MaintenanceAborted',
'MaintenanceCompleted'
:type last_operation_result_code: str or
~azure.mgmt.compute.v2017_03_30.models.MaintenanceOperationResultCodeTypes
:param last_operation_message: Message returned for the last Maintenance
Operation.
:type last_operation_message: str
"""
_attribute_map = {
'is_customer_initiated_maintenance_allowed': {'key': 'isCustomerInitiatedMaintenanceAllowed', 'type': 'bool'},
'pre_maintenance_window_start_time': {'key': 'preMaintenanceWindowStartTime', 'type': 'iso-8601'},
'pre_maintenance_window_end_time': {'key': 'preMaintenanceWindowEndTime', 'type': 'iso-8601'},
'maintenance_window_start_time': {'key': 'maintenanceWindowStartTime', 'type': 'iso-8601'},
'maintenance_window_end_time': {'key': 'maintenanceWindowEndTime', 'type': 'iso-8601'},
'last_operation_result_code': {'key': 'lastOperationResultCode', 'type': 'MaintenanceOperationResultCodeTypes'},
'last_operation_message': {'key': 'lastOperationMessage', 'type': 'str'},
}
def __init__(self, *, is_customer_initiated_maintenance_allowed: bool=None, pre_maintenance_window_start_time=None, pre_maintenance_window_end_time=None, maintenance_window_start_time=None, maintenance_window_end_time=None, last_operation_result_code=None, last_operation_message: str=None, **kwargs) -> None:
super(MaintenanceRedeployStatus, self).__init__(**kwargs)
self.is_customer_initiated_maintenance_allowed = is_customer_initiated_maintenance_allowed
self.pre_maintenance_window_start_time = pre_maintenance_window_start_time
self.pre_maintenance_window_end_time = pre_maintenance_window_end_time
self.maintenance_window_start_time = maintenance_window_start_time
self.maintenance_window_end_time = maintenance_window_end_time
self.last_operation_result_code = last_operation_result_code
self.last_operation_message = last_operation_message
|
{
"content_hash": "e0fa15b127840186f5f42cc94ef8ca97",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 313,
"avg_line_length": 60.51020408163265,
"alnum_prop": 0.7274873524451939,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "1d3532edebdeed6a51f179c74b0927ccd6e9c777",
"size": "3439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/v2017_03_30/models/maintenance_redeploy_status_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import unittest
from opencensus.metrics import label_key
from opencensus.metrics.export import metric_descriptor, value
NAME = 'metric'
DESCRIPTION = 'Metric description'
UNIT = '0.738.[ft_i].[lbf_av]/s'
LABEL_KEY1 = label_key.LabelKey('key1', 'key description one')
LABEL_KEY2 = label_key.LabelKey('值', '测试用键')
LABEL_KEYS = (LABEL_KEY1, LABEL_KEY2)
class TestMetricDescriptor(unittest.TestCase):
def test_init(self):
md = metric_descriptor.MetricDescriptor(
NAME, DESCRIPTION, UNIT,
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE,
(LABEL_KEY1, LABEL_KEY2))
self.assertEqual(md.name, NAME)
self.assertEqual(md.description, DESCRIPTION)
self.assertEqual(md.unit, UNIT)
self.assertEqual(md.type,
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE)
self.assertEqual(md.label_keys, LABEL_KEYS)
def test_bogus_type(self):
with self.assertRaises(ValueError):
metric_descriptor.MetricDescriptor(NAME, DESCRIPTION, UNIT, 0,
(LABEL_KEY1, ))
def test_null_label_keys(self):
with self.assertRaises(ValueError):
metric_descriptor.MetricDescriptor(
NAME, DESCRIPTION, UNIT,
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, None)
def test_empty_label_keys(self):
metric_descriptor.MetricDescriptor(
NAME, DESCRIPTION, UNIT,
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, [])
def test_null_label_key_values(self):
with self.assertRaises(ValueError):
metric_descriptor.MetricDescriptor(
NAME, DESCRIPTION, UNIT,
metric_descriptor.MetricDescriptorType.GAUGE_DOUBLE, (None, ))
def test_to_type_class(self):
self.assertEqual(
metric_descriptor.MetricDescriptorType.to_type_class(
metric_descriptor.MetricDescriptorType.GAUGE_INT64),
value.ValueLong)
with self.assertRaises(ValueError):
metric_descriptor.MetricDescriptorType.to_type_class(10)
|
{
"content_hash": "606e777bb80127045685e8564efff078",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 38.464285714285715,
"alnum_prop": 0.6504178272980501,
"repo_name": "census-instrumentation/opencensus-python",
"id": "ff79a3d3ceecc182a151f8491239ac8b37e1fbc7",
"size": "2773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/metrics/export/test_metric_descriptor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1856"
},
{
"name": "Makefile",
"bytes": "615"
},
{
"name": "Python",
"bytes": "1673591"
},
{
"name": "Shell",
"bytes": "4011"
}
],
"symlink_target": ""
}
|
AUTH_FAILURE = "Your username and password were incorrect. Sorry"
def NO_FIELD_SUPPLIED(field):
return 'No %s supplied' % field
PASSWORDS_DONT_MATCH = 'The passwords don\'t match'
def FIELD_MUST_BE_LEN(field, the_len):
return '%s must be at least %s characters long' % (field, the_len)
USERNAME_TAKEN = 'Username already taken'
NO_CONTACT_FOUND = 'No contact with that username found'
def ALREADY_CONNECTED(contact):
return "You're already connected to %s" % contact
CANT_CONNECT_TO_SELF = "You can't connect to yourself!"
def MUST_BE_GREATER_THAN_ZERO(field):
return "%s must be greater than zero" % field
def MUST_BE_POS_INTEGER(field):
return "%s must be a number greater than zero" % field
|
{
"content_hash": "0804f5c6a65f523e28b252eb38f65254",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 47.6,
"alnum_prop": 0.7240896358543417,
"repo_name": "whilefalse/Debt-Collector",
"id": "2a9f2c0f37a6d2e2acc25df33682dce4b76b7629",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lang/en.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20610"
}
],
"symlink_target": ""
}
|
import pywatchman
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestLog(WatchmanTestCase.WatchmanTestCase):
def test_invalidNumArgsLogLevel(self) -> None:
for params in [["log-level"], ["log-level", "debug", "extra"]]:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(*params)
self.assertIn("wrong number of arguments", str(ctx.exception))
def test_invalidLevelLogLevel(self) -> None:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("log-level", "invalid")
self.assertIn("invalid log level", str(ctx.exception))
def test_invalidNumArgsLog(self) -> None:
for params in [["log"], ["log", "debug"], ["log", "debug", "test", "extra"]]:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand(*params)
self.assertIn("wrong number of arguments", str(ctx.exception))
def test_invalidLevelLog(self) -> None:
with self.assertRaises(pywatchman.WatchmanError) as ctx:
self.watchmanCommand("log", "invalid", "test")
self.assertIn("invalid log level", str(ctx.exception))
|
{
"content_hash": "d8223ff8830e900a622047e6fe1fd815",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 85,
"avg_line_length": 40.74193548387097,
"alnum_prop": 0.66270783847981,
"repo_name": "facebook/watchman",
"id": "bf84d3c29c879c3745ec6b7b6febb473e9f61ecc",
"size": "1464",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "watchman/integration/test_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "366"
},
{
"name": "C",
"bytes": "68699"
},
{
"name": "C++",
"bytes": "1332177"
},
{
"name": "CMake",
"bytes": "111122"
},
{
"name": "CSS",
"bytes": "17348"
},
{
"name": "Dockerfile",
"bytes": "1599"
},
{
"name": "HTML",
"bytes": "37275"
},
{
"name": "Java",
"bytes": "153731"
},
{
"name": "JavaScript",
"bytes": "36532"
},
{
"name": "Python",
"bytes": "854340"
},
{
"name": "Ruby",
"bytes": "17037"
},
{
"name": "Rust",
"bytes": "228777"
},
{
"name": "SCSS",
"bytes": "25149"
},
{
"name": "Shell",
"bytes": "9516"
},
{
"name": "Starlark",
"bytes": "1317"
},
{
"name": "Thrift",
"bytes": "79532"
}
],
"symlink_target": ""
}
|
import numbers
from telemetry import value as value_module
from telemetry.value import list_of_scalar_values
from telemetry.value import none_values
from telemetry.value import summarizable
class ScalarValue(summarizable.SummarizableValue):
def __init__(self, page, name, units, value, important=True,
description=None, tir_label=None,
none_value_reason=None, improvement_direction=None):
"""A single value (float or integer) result from a test.
A test that counts the number of DOM elements in a page might produce a
scalar value:
ScalarValue(page, 'num_dom_elements', 'count', num_elements)
"""
super(ScalarValue, self).__init__(page, name, units, important, description,
tir_label, improvement_direction)
assert value is None or isinstance(value, numbers.Number)
none_values.ValidateNoneValueReason(value, none_value_reason)
self.value = value
self.none_value_reason = none_value_reason
def __repr__(self):
if self.page:
page_name = self.page.display_name
else:
page_name = 'None'
return ('ScalarValue(%s, %s, %s, %s, important=%s, description=%s, '
'tir_label=%s, improvement_direction=%s') % (
page_name,
self.name,
self.units,
self.value,
self.important,
self.description,
self.tir_label,
self.improvment_direction)
def GetBuildbotDataType(self, output_context):
if self._IsImportantGivenOutputIntent(output_context):
return 'default'
return 'unimportant'
def GetBuildbotValue(self):
# Buildbot's print_perf_results method likes to get lists for all values,
# even when they are scalar, so list-ize the return value.
return [self.value]
def GetRepresentativeNumber(self):
return self.value
def GetRepresentativeString(self):
return str(self.value)
@staticmethod
def GetJSONTypeName():
return 'scalar'
def AsDict(self):
d = super(ScalarValue, self).AsDict()
d['value'] = self.value
if self.none_value_reason is not None:
d['none_value_reason'] = self.none_value_reason
return d
@staticmethod
def FromDict(value_dict, page_dict):
kwargs = value_module.Value.GetConstructorKwArgs(value_dict, page_dict)
kwargs['value'] = value_dict['value']
kwargs['improvement_direction'] = value_dict['improvement_direction']
if 'none_value_reason' in value_dict:
kwargs['none_value_reason'] = value_dict['none_value_reason']
if 'tir_label' in value_dict:
kwargs['tir_label'] = value_dict['tir_label']
return ScalarValue(**kwargs)
@classmethod
def MergeLikeValuesFromSamePage(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, v0.page, v0.name, v0.tir_label)
@classmethod
def MergeLikeValuesFromDifferentPages(cls, values):
assert len(values) > 0
v0 = values[0]
return cls._MergeLikeValues(values, None, v0.name, v0.tir_label)
@classmethod
def _MergeLikeValues(cls, values, page, name, tir_label):
v0 = values[0]
merged_value = [v.value for v in values]
none_value_reason = None
if None in merged_value:
merged_value = None
none_value_reason = none_values.MERGE_FAILURE_REASON
return list_of_scalar_values.ListOfScalarValues(
page, name, v0.units, merged_value, important=v0.important,
tir_label=tir_label,
none_value_reason=none_value_reason,
improvement_direction=v0.improvement_direction)
|
{
"content_hash": "82693f5826ae56b961ae2037417150aa",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 33.601851851851855,
"alnum_prop": 0.6632681179388261,
"repo_name": "CapOM/ChromiumGStreamerBackend",
"id": "bd533866eb6f25bb5d1f7a3eeae7efb04ba65d52",
"size": "3792",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/value/scalar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "37073"
},
{
"name": "Batchfile",
"bytes": "8451"
},
{
"name": "C",
"bytes": "9560486"
},
{
"name": "C++",
"bytes": "246838243"
},
{
"name": "CSS",
"bytes": "943687"
},
{
"name": "DM",
"bytes": "60"
},
{
"name": "Groff",
"bytes": "2494"
},
{
"name": "HTML",
"bytes": "27371019"
},
{
"name": "Java",
"bytes": "15348315"
},
{
"name": "JavaScript",
"bytes": "20872607"
},
{
"name": "Makefile",
"bytes": "70983"
},
{
"name": "Objective-C",
"bytes": "2029825"
},
{
"name": "Objective-C++",
"bytes": "10156554"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "182741"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "494625"
},
{
"name": "Python",
"bytes": "8594611"
},
{
"name": "Shell",
"bytes": "486464"
},
{
"name": "Standard ML",
"bytes": "5106"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import time
import json
from wand.image import Image
from wand.display import display
start_time = time.time()
img_file = '../images/orientation-examples/7-F.jpg'
orientation = [
# 'undefined',
# 'top_left',
'top_right',
'bottom_right',
'bottom_left',
'left_top',
'right_top',
'right_bottom',
'left_bottom']
with Image(filename=img_file) as original:
o = original.orientation
print o
if o in orientation:
with original.convert('png') as image:
with image.clone() as img:
if o == 'top_right':
img.flop()
img.save(filename='out/%s-img.jpg' % o)
elif o == 'bottom_right':
img.rotate(180)
img.save(filename='out/%s-img.jpg' % o)
elif o == 'bottom_left':
img.flip()
img.save(filename='out/%s-img.jpg' % o)
elif o == 'left_top':
img.rotate(90)
img.flop()
img.save(filename='out/%s-img.jpg' % o)
elif o == 'right_top':
img.rotate(90)
img.save(filename='out/%s-img.png' % o)
elif o == 'right_bottom':
img.rotate(-90)
img.flop()
img.save(filename='out/%s-img.jpg' % o)
elif o == 'left_bottom':
img.rotate(-90)
img.save(filename='out/%s-img.jpg' % o)
display(img)
|
{
"content_hash": "9c058263663df760005459119ddc1e41",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 59,
"avg_line_length": 30.84313725490196,
"alnum_prop": 0.4551811824539097,
"repo_name": "nbari/my-sandbox",
"id": "8b652f8498ac54f1df5dba203d6f8c11b16d584b",
"size": "1573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/wand/exif/apply_orientation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5321"
},
{
"name": "HTML",
"bytes": "53098"
},
{
"name": "PHP",
"bytes": "5266"
},
{
"name": "Python",
"bytes": "240689"
},
{
"name": "Shell",
"bytes": "5076"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
from pip.req import parse_requirements
reqs_txt = os.path.join(os.path.dirname(__file__), 'requirements.txt')
pip_reqs = [unicode(obj.req) for obj in parse_requirements(reqs_txt)]
setup(
name = 'baseball-projection-schematics',
version = '0.2.0',
description = 'Translates projection data into a unified schema',
author = 'Matt Dennewitz',
author_email = 'mattdennewitz@gmail.com',
url = 'https://github.com/mattdennewitz/baseball-projection-schematics',
include_package_data = True,
install_requires = pip_reqs,
packages = find_packages(),
scripts = [
'scripts/bb-generate-schematic',
'scripts/bb-process-projections',
],
)
|
{
"content_hash": "a164b1d66ba7b394301217dacfc32ccc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 76,
"avg_line_length": 26.678571428571427,
"alnum_prop": 0.6854082998661312,
"repo_name": "mattdennewitz/baseball-projection-schematics",
"id": "7bba9a6ac6f16be10f100fed080b223ff80f1723",
"size": "770",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13080"
},
{
"name": "Smarty",
"bytes": "894"
}
],
"symlink_target": ""
}
|
"""Benchmarks using custom training loop on MNIST dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import timeit
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.benchmarks import benchmark_util
from tensorflow.python.keras.benchmarks import distribution_util
class CustomMnistBenchmark(tf.test.Benchmark):
"""Benchmarks for custom training loop using `tf.test.Benchmark`."""
def __init__(self):
super(CustomMnistBenchmark, self).__init__()
self.num_classes = 10
self.input_shape = (28, 28, 1)
self.epochs = 15
(x_train, y_train), _ = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype('float32') / 255
x_train = np.expand_dims(x_train, -1)
y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)
self.num_examples = x_train.shape[0]
# Use `tf.data.Dataset` for custom training loop.
self.train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
def _build_model(self):
"""Model from https://keras.io/examples/vision/mnist_convnet/."""
model = tf.keras.Sequential([
tf.keras.Input(shape=self.input_shape),
tf.keras.layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(self.num_classes, activation='softmax'),
])
return model
def compute_loss(self, targets, predictions, loss_fn, batch_size):
"""Compute average loss."""
per_example_loss = loss_fn(targets, predictions)
return tf.nn.compute_average_loss(
per_example_loss, global_batch_size=batch_size)
@tf.function(experimental_relax_shapes=True)
def train_step(self, inputs, model, loss_fn, optimizer, batch_size):
"""Compute loss and optimize model by optimizer.
Args:
inputs: `tf.data`.
model: See `model` in `train_function()` method.
loss_fn: See `loss_fn` in `train_function()` method.
optimizer: See `optimizer` in `train_function()` method.
batch_size: See `batch_size` in `train_function()` method.
Returns:
Loss value.
"""
train_x, train_y = inputs
with tf.GradientTape() as tape:
predictions = model(train_x, training=True)
loss = self.compute_loss(train_y, predictions, loss_fn, batch_size)
grads = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
return loss
@tf.function(experimental_relax_shapes=True)
def distributed_train_step(self, batch_dataset, model, loss_fn, optimizer,
batch_size, distribution_strategy):
"""Train step in distribution strategy setting.
Args:
batch_dataset: `tf.data`.
model: See `model` in `train_function()` method.
loss_fn: See `loss_fn` in `train_function()` method.
optimizer: See `optimizer` in `train_function()` method.
batch_size: See `batch_size` in `train_function()` method.
distribution_strategy: See `distribution_strategy` in `train_function()`
method.
Returns:
Sum of per_replica_losses.
"""
per_replica_losses = distribution_strategy.run(
self.train_step,
args=(
batch_dataset,
model,
loss_fn,
optimizer,
batch_size,
))
return distribution_strategy.reduce(
tf.distribute.ReduceOp.SUM, per_replica_losses, axis=None)
def train_function(self,
model,
train_dataset,
loss_fn,
optimizer,
epochs=2,
distribution_strategy=None,
batch_size=256):
"""Train model in custom training loop and return average
train_step_time.
Args:
model: Model function to be benchmarked.
train_dataset: `tf.data` dataset. Should return a tuple of either (inputs,
targets) or (inputs, targets, sample_weights).
loss_fn: `tf.keras.losses.Loss` instance.
optimizer: `tf.keras.optimizers` instance.
epochs: Integer. Number of epochs to train the model. If unspecified,
`epochs` will default to 2.
distribution_strategy: Distribution strategies. It could be
`multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,
`distribution_strategy` will default to 'off'. Note that, `TPU` and
`parameter_server` are not supported yet.
batch_size: Integer. Number of samples per gradient update. If
unspecified, `batch_size` will default to 32.
Returns:
Average train_step_time.
"""
train_step_time_list = []
timer = timeit.default_timer
total_loss = 0.0
num_batches = 0
for _ in range(epochs):
# Iterate over the batches of the dataset.
for batch_dataset in train_dataset:
start_time = timer()
if distribution_strategy is not None:
total_loss += self.distributed_train_step(batch_dataset, model,
loss_fn, optimizer,
batch_size,
distribution_strategy)
else:
total_loss += self.train_step(batch_dataset, model, loss_fn,
optimizer, batch_size)
num_batches += 1
end_time = timer()
train_step_time_list.append(end_time - start_time)
return np.mean(train_step_time_list)
def measure_performance(self,
model,
dataset,
loss_fn,
optimizer,
batch_size=32,
run_iters=4,
epochs=10,
distribution_strategy=None):
"""Run models and measure the performance.
Args:
model_fn: Model function to be benchmarked.
dataset: `tf.data` dataset. Should return a tuple of either (inputs,
targets) or (inputs, targets, sample_weights).
loss_fn: `tf.keras.losses.Loss` instance.
optimizer: `tf.keras.optimizers` instance.
batch_size: Integer. Number of samples per gradient update. If
unspecified, `batch_size` will default to 32.
run_iters: Integer. Number of iterations to run the performance
measurement. If unspecified, `run_iters` will default to 4.
epochs: Integer. Number of epochs to train the model. If unspecified,
`epochs` will default to 10.
distribution_strategy: Distribution strategies. It could be
`multi_worker_mirrored`, `one_device`, `mirrored`. If unspecified,
`distribution_strategy` will default to 'off'. Note that, `TPU` and
`parameter_server` are not supported yet.
Returns:
Performance summary, which contains build_time, avg_epoch_time,
wall_time, exp_per_sec, epochs, warmup_time, train_step_time.
Raise:
ValueError: if `dataset` is None or if `optimizer` instance is
not provided or if `loss_fn` instance is not provided.
"""
if distribution_strategy is not None and \
not isinstance(dataset, tf.distribute.DistributedDataset):
raise ValueError('tf.distribute.DistributedDataset'
' required in distribution strategy.')
if distribution_strategy is None and \
not isinstance(dataset, tf.data.Dataset):
raise ValueError('`tf.data` is required.')
if not isinstance(loss_fn, tf.keras.losses.Loss):
raise ValueError('`tf.keras.losses.Loss` instance '
'for loss_fn is required.')
if not isinstance(optimizer, tf.keras.optimizers.Optimizer):
raise ValueError('`tf.keras.optimizers` instance '
'for optimizer is required.')
avg_epoch_time_list, train_step_time_list = [], []
wall_time_list, exp_per_sec_list, warmup_time_list = [], [], []
total_num_examples = epochs * self.num_examples
for _ in range(run_iters):
timer = timeit.default_timer
start_time = timer()
t1 = timer()
self.train_function(model, dataset, loss_fn, optimizer, 1,
distribution_strategy, batch_size)
warmup_time = timer() - t1
t2 = timer()
train_step_time = self.train_function(model, dataset, loss_fn, optimizer,
epochs, distribution_strategy,
batch_size)
end_time = timer()
train_step_time_list.append(train_step_time)
warmup_time_list.append(warmup_time)
wall_time_list.append(end_time - start_time)
exp_per_sec_list.append(total_num_examples / (end_time - t2))
avg_epoch_time_list.append((end_time - t2) / epochs)
metrics = []
metrics.append({
'name': 'avg_epoch_time',
'value': np.mean(avg_epoch_time_list)
})
metrics.append({'name': 'exp_per_sec', 'value': np.mean(exp_per_sec_list)})
metrics.append({'name': 'warmup_time', 'value': np.mean(warmup_time_list)})
metrics.append({
'name': 'train_step_time',
'value': np.mean(train_step_time_list)
})
metrics.append({'name': 'epochs', 'value': epochs})
wall_time = np.mean(wall_time_list)
return metrics, wall_time
def benchmark_custom_training_mnist_bs_128(self):
"""Measure performance with batch_size=128 and run_iters=5."""
batch_size = 128
run_iters = 5
train_dataset = self.train_dataset.shuffle(
buffer_size=1024).batch(batch_size)
# Instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
# Instantiate an optimizer to train the model.
optimizer = tf.keras.optimizers.Adam()
model = self._build_model()
metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,
optimizer, batch_size,
run_iters, self.epochs)
extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,
'.keras.ctl_graph')
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_custom_training_mnist_bs_256(self):
"""Measure performance with batch_size=256 and run_iters=5."""
batch_size = 256
run_iters = 5
train_dataset = self.train_dataset.shuffle(
buffer_size=1024).batch(batch_size)
# Instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
# Instantiate an optimizer to train the model.
optimizer = tf.keras.optimizers.Adam()
model = self._build_model()
metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,
optimizer, batch_size,
run_iters, self.epochs)
extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,
'.keras.ctl_graph')
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_custom_training_mnist_bs_512(self):
"""Measure performance with batch_size=512 and run_iters=10."""
batch_size = 512
run_iters = 5
train_dataset = self.train_dataset.shuffle(
buffer_size=1024).batch(batch_size)
# Instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
# Instantiate an optimizer to train the model.
optimizer = tf.keras.optimizers.Adam()
model = self._build_model()
metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,
optimizer, batch_size,
run_iters, self.epochs)
extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,
'.keras.ctl_graph')
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
def benchmark_custom_training_mnist_bs_512_gpu_2(self):
"""Measure performance with batch_size=512, run_iters=10, gpu=2 and
distribution_strategy='mirrored'.
"""
batch_size = 512
run_iters = 10
train_dataset = self.train_dataset.shuffle(
buffer_size=1024).batch(batch_size)
distribution_strategy = 'mirrored'
strategy = distribution_util.get_distribution_strategy(
distribution_strategy=distribution_strategy, num_gpus=2)
if distribution_strategy != 'off':
train_dataset = strategy.experimental_distribute_dataset(train_dataset)
strategy_scope = distribution_util.get_strategy_scope(strategy)
with strategy_scope:
# Instantiate a loss function.
loss_fn = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
# Instantiate an optimizer to train the model.
optimizer = tf.keras.optimizers.Adam()
model = self._build_model()
metrics, wall_time = self.measure_performance(model, train_dataset, loss_fn,
optimizer, batch_size,
run_iters, self.epochs,
strategy)
extras = benchmark_util.get_keras_examples_metadata('conv', batch_size,
'.keras.ctl_graph')
self.report_benchmark(
iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "149eebe8377241b0159afdb4090b054e",
"timestamp": "",
"source": "github",
"line_count": 359,
"max_line_length": 80,
"avg_line_length": 39.590529247910865,
"alnum_prop": 0.6062759445577992,
"repo_name": "annarev/tensorflow",
"id": "f1b431a35db5607966a44946ea38cc3c55efe6a2",
"size": "14902",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
from collections import Sequence # noqa
from django.conf import settings
from horizon import exceptions
import six
__all__ = ('APIResourceWrapper', 'APIDictWrapper',
'get_service_from_catalog', 'url_for',)
class APIVersionManager(object):
"""Object to store and manage API versioning data and utility methods."""
SETTINGS_KEY = "OPENSTACK_API_VERSIONS"
def __init__(self, service_type, preferred_version=None):
self.service_type = service_type
self.preferred = preferred_version
self._active = None
self.supported = {}
# As a convenience, we can drop in a placeholder for APIs that we
# have not yet needed to version. This is useful, for example, when
# panels such as the admin metadata_defs wants to check the active
# version even though it's not explicitly defined. Previously
# this caused a KeyError.
if self.preferred:
self.supported[self.preferred] = {"version": self.preferred}
@property
def active(self):
if self._active is None:
self.get_active_version()
return self._active
def load_supported_version(self, version, data):
self.supported[version] = data
def get_active_version(self):
if self._active is not None:
return self.supported[self._active]
key = getattr(settings, self.SETTINGS_KEY, {}).get(self.service_type)
if key is None:
# TODO(gabriel): support API version discovery here; we'll leave
# the setting in as a way of overriding the latest available
# version.
key = self.preferred
# Since we do a key lookup in the supported dict the type matters,
# let's ensure people know if they use a string when the key isn't.
if isinstance(key, six.string_types):
msg = ('The version "%s" specified for the %s service should be '
'either an integer or a float, not a string.' %
(key, self.service_type))
raise exceptions.ConfigurationError(msg)
# Provide a helpful error message if the specified version isn't in the
# supported list.
if key not in self.supported:
choices = ", ".join(str(k) for k in six.iterkeys(self.supported))
msg = ('%s is not a supported API version for the %s service, '
' choices are: %s' % (key, self.service_type, choices))
raise exceptions.ConfigurationError(msg)
self._active = key
return self.supported[self._active]
def clear_active_cache(self):
self._active = None
class APIResourceWrapper(object):
"""Simple wrapper for api objects.
Define _attrs on the child class and pass in the
api object as the only argument to the constructor
"""
_attrs = []
_apiresource = None # Make sure _apiresource is there even in __init__.
def __init__(self, apiresource):
self._apiresource = apiresource
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._attrs:
raise
# __getattr__ won't find properties
return getattr(self._apiresource, attr)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__,
dict((attr, getattr(self, attr))
for attr in self._attrs
if hasattr(self, attr)))
def to_dict(self):
obj = {}
for key in self._attrs:
obj[key] = getattr(self._apiresource, key, None)
return obj
class APIDictWrapper(object):
"""Simple wrapper for api dictionaries
Some api calls return dictionaries. This class provides identical
behavior as APIResourceWrapper, except that it will also behave as a
dictionary, in addition to attribute accesses.
Attribute access is the preferred method of access, to be
consistent with api resource objects from novaclient.
"""
_apidict = {} # Make sure _apidict is there even in __init__.
def __init__(self, apidict):
self._apidict = apidict
def __getattribute__(self, attr):
try:
return object.__getattribute__(self, attr)
except AttributeError:
if attr not in self._apidict:
raise
return self._apidict[attr]
def __getitem__(self, item):
try:
return getattr(self, item)
except (AttributeError, TypeError) as e:
# caller is expecting a KeyError
raise KeyError(e)
def __contains__(self, item):
try:
return hasattr(self, item)
except TypeError:
return False
def get(self, item, default=None):
try:
return getattr(self, item)
except (AttributeError, TypeError):
return default
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self._apidict)
def __cmp__(self, other):
if hasattr(other, '_apidict'):
return cmp(self._apidict, other._apidict)
return cmp(self._apidict, other)
def to_dict(self):
return self._apidict
class Quota(object):
"""Wrapper for individual limits in a quota."""
def __init__(self, name, limit):
self.name = name
self.limit = limit
def __repr__(self):
return "<Quota: (%s, %s)>" % (self.name, self.limit)
class QuotaSet(Sequence):
"""Wrapper for client QuotaSet objects which turns the individual quotas
into Quota objects for easier handling/iteration.
`QuotaSet` objects support a mix of `list` and `dict` methods; you can use
the bracket notation (`qs["my_quota"] = 0`) to add new quota values, and
use the `get` method to retrieve a specific quota, but otherwise it
behaves much like a list or tuple, particularly in supporting iteration.
"""
def __init__(self, apiresource=None):
self.items = []
if apiresource:
if hasattr(apiresource, '_info'):
items = apiresource._info.items()
else:
items = apiresource.items()
for k, v in items:
if k == 'id':
continue
self[k] = v
def __setitem__(self, k, v):
v = int(v) if v is not None else v
q = Quota(k, v)
self.items.append(q)
def __getitem__(self, index):
return self.items[index]
def __add__(self, other):
"""Merge another QuotaSet into this one. Existing quotas are
not overridden.
"""
if not isinstance(other, QuotaSet):
msg = "Can only add QuotaSet to QuotaSet, " \
"but received %s instead" % type(other)
raise ValueError(msg)
for item in other:
if self.get(item.name).limit is None:
self.items.append(item)
return self
def __len__(self):
return len(self.items)
def __repr__(self):
return repr(self.items)
def get(self, key, default=None):
match = [quota for quota in self.items if quota.name == key]
return match.pop() if len(match) else Quota(key, default)
def add(self, other):
return self.__add__(other)
def get_service_from_catalog(catalog, service_type):
if catalog:
for service in catalog:
if 'type' not in service:
continue
if service['type'] == service_type:
return service
return None
def get_version_from_service(service):
if service and service.get('endpoints'):
endpoint = service['endpoints'][0]
if 'interface' in endpoint:
return 3
else:
return 2.0
return 2.0
# Mapping of V2 Catalog Endpoint_type to V3 Catalog Interfaces
ENDPOINT_TYPE_TO_INTERFACE = {
'publicURL': 'public',
'internalURL': 'internal',
'adminURL': 'admin',
}
def get_url_for_service(service, region, endpoint_type):
if 'type' not in service:
return None
identity_version = get_version_from_service(service)
service_endpoints = service.get('endpoints', [])
available_endpoints = [endpoint for endpoint in service_endpoints
if region == _get_endpoint_region(endpoint)]
"""if we are dealing with the identity service and there is no endpoint
in the current region, it is okay to use the first endpoint for any
identity service endpoints and we can assume that it is global
"""
if service['type'] == 'identity' and not available_endpoints:
available_endpoints = [endpoint for endpoint in service_endpoints]
for endpoint in available_endpoints:
try:
if identity_version < 3:
return endpoint.get(endpoint_type)
else:
interface = \
ENDPOINT_TYPE_TO_INTERFACE.get(endpoint_type, '')
if endpoint.get('interface') == interface:
return endpoint.get('url')
except (IndexError, KeyError):
"""it could be that the current endpoint just doesn't match the
type, continue trying the next one
"""
pass
return None
def url_for(request, service_type, endpoint_type=None, region=None):
endpoint_type = endpoint_type or getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
fallback_endpoint_type = getattr(settings, 'SECONDARY_ENDPOINT_TYPE', None)
catalog = request.user.service_catalog
service = get_service_from_catalog(catalog, service_type)
if service:
if not region:
region = request.user.services_region
url = get_url_for_service(service,
region,
endpoint_type)
if not url and fallback_endpoint_type:
url = get_url_for_service(service,
region,
fallback_endpoint_type)
if url:
return url
raise exceptions.ServiceCatalogException(service_type)
def is_service_enabled(request, service_type):
service = get_service_from_catalog(request.user.service_catalog,
service_type)
if service:
region = request.user.services_region
for endpoint in service.get('endpoints', []):
if 'type' not in service:
continue
# ignore region for identity
if service['type'] == 'identity' or \
_get_endpoint_region(endpoint) == region:
return True
return False
def _get_endpoint_region(endpoint):
"""Common function for getting the region from endpoint.
In Keystone V3, region has been deprecated in favor of
region_id.
This method provides a way to get region that works for
both Keystone V2 and V3.
"""
return endpoint.get('region_id') or endpoint.get('region')
|
{
"content_hash": "672e64aa80e4a4e9fdd4415838a781f6",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 79,
"avg_line_length": 33.6865671641791,
"alnum_prop": 0.5840496233938857,
"repo_name": "gerrive/horizon",
"id": "ea45eefbda94accc6a202e5031d1efa5cb270136",
"size": "12049",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91407"
},
{
"name": "HTML",
"bytes": "468841"
},
{
"name": "JavaScript",
"bytes": "1423635"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4820370"
},
{
"name": "Shell",
"bytes": "19004"
}
],
"symlink_target": ""
}
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on January, 24, 2013
@author: Sourabh Bajaj
@contact: sourabhbajaj@gatech.edu
@summary: Example tutorial code.
'''
# QSTK Imports
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.tsutil as tsu
import QSTK.qstkutil.DataAccess as da
# Third Party Imports
import datetime as dt
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def main():
''' Main Function'''
# Reading the portfolio
na_portfolio = np.loadtxt('tutorial3portfolio.csv', dtype='S5,f4',
delimiter=',', comments="#", skiprows=1)
print na_portfolio
# Sorting the portfolio by symbol name
na_portfolio = sorted(na_portfolio, key=lambda x: x[0])
print na_portfolio
# Create two list for symbol names and allocation
ls_port_syms = []
lf_port_alloc = []
for port in na_portfolio:
ls_port_syms.append(port[0])
lf_port_alloc.append(port[1])
# Creating an object of the dataaccess class with Yahoo as the source.
c_dataobj = da.DataAccess('Yahoo')
ls_all_syms = c_dataobj.get_all_symbols()
# Bad symbols are symbols present in portfolio but not in all syms
ls_bad_syms = list(set(ls_port_syms) - set(ls_all_syms))
if len(ls_bad_syms) != 0:
print "Portfolio contains bad symbols : ", ls_bad_syms
for s_sym in ls_bad_syms:
i_index = ls_port_syms.index(s_sym)
ls_port_syms.pop(i_index)
lf_port_alloc.pop(i_index)
# Reading the historical data.
dt_end = dt.datetime(2011, 1, 1)
dt_start = dt_end - dt.timedelta(days=1095) # Three years
# We need closing prices so the timestamp should be hours=16.
dt_timeofday = dt.timedelta(hours=16)
# Get a list of trading days between the start and the end.
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt_timeofday)
# Keys to be read from the data, it is good to read everything in one go.
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
# Reading the data, now d_data is a dictionary with the keys above.
# Timestamps and symbols are the ones that were specified before.
ldf_data = c_dataobj.get_data(ldt_timestamps, ls_port_syms, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
# Copying close price into separate dataframe to find rets
df_rets = d_data['close'].copy()
# Filling the data.
df_rets = df_rets.fillna(method='ffill')
df_rets = df_rets.fillna(method='bfill')
df_rets = df_rets.fillna(1.0)
# Numpy matrix of filled data values
na_rets = df_rets.values
# returnize0 works on ndarray and not dataframes.
tsu.returnize0(na_rets)
# Estimate portfolio returns
na_portrets = np.sum(na_rets * lf_port_alloc, axis=1)
na_port_total = np.cumprod(na_portrets + 1)
na_component_total = np.cumprod(na_rets + 1, axis=0)
# Plotting the results
plt.clf()
fig = plt.figure()
fig.add_subplot(111)
plt.plot(ldt_timestamps, na_component_total, alpha=0.4)
plt.plot(ldt_timestamps, na_port_total)
ls_names = ls_port_syms
ls_names.append('Portfolio')
plt.legend(ls_names)
plt.ylabel('Cumulative Returns')
plt.xlabel('Date')
fig.autofmt_xdate(rotation=45)
plt.savefig('tutorial3.pdf', format='pdf')
if __name__ == '__main__':
main()
|
{
"content_hash": "e77594169202b2a6cad9904e6e911854",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 77,
"avg_line_length": 32.757009345794394,
"alnum_prop": 0.6690442225392297,
"repo_name": "telefar/stockEye",
"id": "38d410b1c3bdc73aaae59c531c336313c5cff676",
"size": "3505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coursera-compinvest1-master/coursera-compinvest1-master/Examples/Basic/tutorial3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12814528"
},
{
"name": "Shell",
"bytes": "530"
},
{
"name": "TSQL",
"bytes": "1056"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import unittest
from paddle.fluid.tests.unittests.test_cross_entropy_op import TestCrossEntropyOp, TestCrossEntropyOp2, TestCrossEntropyOp3, TestCrossEntropyOp4, TestCrossEntropyOp5, TestCrossEntropyOp6, TestCrossEntropyOp7
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "fb753d75908f2082552d04837a308740",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 207,
"avg_line_length": 44.42857142857143,
"alnum_prop": 0.8038585209003215,
"repo_name": "chengduoZH/Paddle",
"id": "3057218a1d80deffe7eb3164c2350143fc38007d",
"size": "924",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/ngraph/test_cross_entropy_ngraph_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "32490"
},
{
"name": "C++",
"bytes": "10146609"
},
{
"name": "CMake",
"bytes": "291349"
},
{
"name": "Cuda",
"bytes": "1192566"
},
{
"name": "Dockerfile",
"bytes": "10002"
},
{
"name": "Python",
"bytes": "7124331"
},
{
"name": "Ruby",
"bytes": "353"
},
{
"name": "Shell",
"bytes": "200906"
}
],
"symlink_target": ""
}
|
from . import AWSObject, AWSProperty, PropsDictType, Tags
from .validators import boolean, double, integer
class ConnectAttachmentOptions(AWSProperty):
"""
`ConnectAttachmentOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-connectattachment-connectattachmentoptions.html>`__
"""
props: PropsDictType = {
"Protocol": (str, False),
}
class ConnectAttachment(AWSObject):
"""
`ConnectAttachment <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-connectattachment.html>`__
"""
resource_type = "AWS::NetworkManager::ConnectAttachment"
props: PropsDictType = {
"CoreNetworkId": (str, False),
"EdgeLocation": (str, False),
"Options": (ConnectAttachmentOptions, False),
"Tags": (Tags, False),
"TransportAttachmentId": (str, False),
}
class BgpOptions(AWSProperty):
"""
`BgpOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-connectpeer-bgpoptions.html>`__
"""
props: PropsDictType = {
"PeerAsn": (double, False),
}
class ConnectPeer(AWSObject):
"""
`ConnectPeer <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-connectpeer.html>`__
"""
resource_type = "AWS::NetworkManager::ConnectPeer"
props: PropsDictType = {
"BgpOptions": (BgpOptions, False),
"ConnectAttachmentId": (str, False),
"CoreNetworkAddress": (str, False),
"InsideCidrBlocks": ([str], False),
"PeerAddress": (str, False),
"Tags": (Tags, False),
}
class CoreNetwork(AWSObject):
"""
`CoreNetwork <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-corenetwork.html>`__
"""
resource_type = "AWS::NetworkManager::CoreNetwork"
props: PropsDictType = {
"Description": (str, False),
"GlobalNetworkId": (str, True),
"PolicyDocument": (dict, False),
"Tags": (Tags, False),
}
class CustomerGatewayAssociation(AWSObject):
"""
`CustomerGatewayAssociation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-customergatewayassociation.html>`__
"""
resource_type = "AWS::NetworkManager::CustomerGatewayAssociation"
props: PropsDictType = {
"CustomerGatewayArn": (str, True),
"DeviceId": (str, True),
"GlobalNetworkId": (str, True),
"LinkId": (str, False),
}
class Location(AWSProperty):
"""
`Location <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-site-location.html>`__
"""
props: PropsDictType = {
"Address": (str, False),
"Latitude": (str, False),
"Longitude": (str, False),
}
class Device(AWSObject):
"""
`Device <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-device.html>`__
"""
resource_type = "AWS::NetworkManager::Device"
props: PropsDictType = {
"Description": (str, False),
"GlobalNetworkId": (str, True),
"Location": (Location, False),
"Model": (str, False),
"SerialNumber": (str, False),
"SiteId": (str, False),
"Tags": (Tags, False),
"Type": (str, False),
"Vendor": (str, False),
}
class GlobalNetwork(AWSObject):
"""
`GlobalNetwork <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-globalnetwork.html>`__
"""
resource_type = "AWS::NetworkManager::GlobalNetwork"
props: PropsDictType = {
"Description": (str, False),
"Tags": (Tags, False),
}
class Bandwidth(AWSProperty):
"""
`Bandwidth <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-link-bandwidth.html>`__
"""
props: PropsDictType = {
"DownloadSpeed": (integer, False),
"UploadSpeed": (integer, False),
}
class Link(AWSObject):
"""
`Link <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-link.html>`__
"""
resource_type = "AWS::NetworkManager::Link"
props: PropsDictType = {
"Bandwidth": (Bandwidth, True),
"Description": (str, False),
"GlobalNetworkId": (str, True),
"Provider": (str, False),
"SiteId": (str, True),
"Tags": (Tags, False),
"Type": (str, False),
}
class LinkAssociation(AWSObject):
"""
`LinkAssociation <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-linkassociation.html>`__
"""
resource_type = "AWS::NetworkManager::LinkAssociation"
props: PropsDictType = {
"DeviceId": (str, True),
"GlobalNetworkId": (str, True),
"LinkId": (str, True),
}
class Site(AWSObject):
"""
`Site <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-site.html>`__
"""
resource_type = "AWS::NetworkManager::Site"
props: PropsDictType = {
"Description": (str, False),
"GlobalNetworkId": (str, True),
"Location": (Location, False),
"Tags": (Tags, False),
}
class SiteToSiteVpnAttachment(AWSObject):
"""
`SiteToSiteVpnAttachment <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-sitetositevpnattachment.html>`__
"""
resource_type = "AWS::NetworkManager::SiteToSiteVpnAttachment"
props: PropsDictType = {
"CoreNetworkId": (str, False),
"Tags": (Tags, False),
"VpnConnectionArn": (str, False),
}
class TransitGatewayRegistration(AWSObject):
"""
`TransitGatewayRegistration <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-transitgatewayregistration.html>`__
"""
resource_type = "AWS::NetworkManager::TransitGatewayRegistration"
props: PropsDictType = {
"GlobalNetworkId": (str, True),
"TransitGatewayArn": (str, True),
}
class VpcOptions(AWSProperty):
"""
`VpcOptions <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-vpcattachment-vpcoptions.html>`__
"""
props: PropsDictType = {
"Ipv6Support": (boolean, False),
}
class VpcAttachment(AWSObject):
"""
`VpcAttachment <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-networkmanager-vpcattachment.html>`__
"""
resource_type = "AWS::NetworkManager::VpcAttachment"
props: PropsDictType = {
"CoreNetworkId": (str, False),
"Options": (VpcOptions, False),
"SubnetArns": ([str], False),
"Tags": (Tags, False),
"VpcArn": (str, False),
}
class CoreNetworkEdge(AWSProperty):
"""
`CoreNetworkEdge <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-corenetwork-corenetworkedge.html>`__
"""
props: PropsDictType = {
"Asn": (double, False),
"EdgeLocation": (str, False),
"InsideCidrBlocks": ([str], False),
}
class CoreNetworkSegment(AWSProperty):
"""
`CoreNetworkSegment <http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-networkmanager-corenetwork-corenetworksegment.html>`__
"""
props: PropsDictType = {
"EdgeLocations": ([str], False),
"Name": (str, False),
"SharedSegments": ([str], False),
}
|
{
"content_hash": "d2d811557de1a653704c4730bce3d085",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 174,
"avg_line_length": 28.871698113207547,
"alnum_prop": 0.6437067050058816,
"repo_name": "cloudtools/troposphere",
"id": "b98490b7c77337455073733f2fd5985c923fa279",
"size": "7823",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "troposphere/networkmanager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "2754"
},
{
"name": "Python",
"bytes": "2305574"
},
{
"name": "Shell",
"bytes": "625"
}
],
"symlink_target": ""
}
|
import os
from os.path import join
import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
assert_raises)
import pytest
from numpy.random import (
Generator, MT19937, PCG64, PCG64DXSM, Philox, RandomState, SeedSequence,
SFC64, default_rng
)
from numpy.random._common import interface
try:
import cffi # noqa: F401
MISSING_CFFI = False
except ImportError:
MISSING_CFFI = True
try:
import ctypes # noqa: F401
MISSING_CTYPES = False
except ImportError:
MISSING_CTYPES = False
if sys.flags.optimize > 1:
# no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
# cffi cannot succeed
MISSING_CFFI = True
pwd = os.path.dirname(os.path.abspath(__file__))
def assert_state_equal(actual, target):
for key in actual:
if isinstance(actual[key], dict):
assert_state_equal(actual[key], target[key])
elif isinstance(actual[key], np.ndarray):
assert_array_equal(actual[key], target[key])
else:
assert actual[key] == target[key]
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
lower = np.uint64(0xffffffff)
lower = np.array(x & lower, dtype=np.uint32)
joined = np.column_stack([lower, upper]).ravel()
out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
def uniform32_from_uint32(x):
return (x >> np.uint32(9)) * (1.0 / 2 ** 23)
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
return uniform32_from_uint32(x)
else:
raise NotImplementedError
def uniform_from_uint(x, bits):
if bits in (64, 63, 53):
return uniform_from_uint64(x)
elif bits == 32:
return uniform_from_uint32(x)
def uniform_from_uint64(x):
return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
def uniform_from_uint32(x):
out = np.empty(len(x) // 2)
for i in range(0, len(x), 2):
a = x[i] >> 5
b = x[i + 1] >> 6
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
def gauss_from_uint(x, n, bits):
if bits in (64, 63):
doubles = uniform_from_uint64(x)
elif bits == 32:
doubles = uniform_from_uint32(x)
else: # bits == 'dsfmt'
doubles = uniform_from_dsfmt(x)
gauss = []
loc = 0
x1 = x2 = 0.0
while len(gauss) < n:
r2 = 2
while r2 >= 1.0 or r2 == 0.0:
x1 = 2.0 * doubles[loc] - 1.0
x2 = 2.0 * doubles[loc + 1] - 1.0
r2 = x1 * x1 + x2 * x2
loc += 2
f = np.sqrt(-2.0 * np.log(r2) / r2)
gauss.append(f * x2)
gauss.append(f * x1)
return gauss[:n]
def test_seedsequence():
from numpy.random.bit_generator import (ISeedSequence,
ISpawnableSeedSequence,
SeedlessSeedSequence)
s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
s1.spawn(10)
s2 = SeedSequence(**s1.state)
assert_equal(s1.state, s2.state)
assert_equal(s1.n_children_spawned, s2.n_children_spawned)
# The interfaces cannot be instantiated themselves.
assert_raises(TypeError, ISeedSequence)
assert_raises(TypeError, ISpawnableSeedSequence)
dummy = SeedlessSeedSequence()
assert_raises(NotImplementedError, dummy.generate_state, 10)
assert len(dummy.spawn(10)) == 10
class Base:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert_state_equal(state, alt_state)
class TestPhilox(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = Philox
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/philox-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
def test_set_key(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
keyed = self.bit_generator(counter=state['state']['counter'],
key=state['state']['key'])
assert_state_equal(bit_generator.state, keyed.state)
class TestPCG64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
class TestPCG64DXSM(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64DXSM
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64dxsm-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
def test_advance_symmetry(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
state = rs.bit_generator.state
step = -0x9e3779b97f4a7c150000000000000000
rs.bit_generator.advance(step)
val_neg = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(2**128 + step)
val_pos = rs.integers(10)
rs.bit_generator.state = state
rs.bit_generator.advance(10 * 2**128 + step)
val_big = rs.integers(10)
assert val_neg == val_pos
assert val_big == val_pos
class TestMT19937(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
class TestSFC64(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = SFC64
cls.bits = 64
cls.dtype = np.uint64
cls.data1 = cls._read_csv(
join(pwd, './data/sfc64-testset-1.csv'))
cls.data2 = cls._read_csv(
join(pwd, './data/sfc64-testset-2.csv'))
cls.seed_error_type = (ValueError, TypeError)
cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
cls.invalid_init_values = [(-1,)]
class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
assert isinstance(rg.bit_generator, PCG64)
def test_passthrough(self):
bg = Philox()
rg = default_rng(bg)
assert rg.bit_generator is bg
rg2 = default_rng(rg)
assert rg2 is rg
assert rg2.bit_generator is bg
|
{
"content_hash": "f6251585b1207274b024afca9d8b710e",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 83,
"avg_line_length": 34.15673289183223,
"alnum_prop": 0.5971692625864409,
"repo_name": "ryfeus/lambda-packs",
"id": "29054b70b95a01227351dba02040aac7d82c2bb2",
"size": "15473",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Sklearn_arm/source/numpy/random/tests/test_direct.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
"""
When training a model, it's often useful to decay the
learning rate during training process, this is called
learning_rate_decay. There are many strategies to do
this, this module will provide some classical method.
User can also implement their own learning_rate_decay
strategy according to this module.
"""
import control_flow
import nn
import ops
import tensor
from ..initializer import init_on_cpu
__all__ = [
'exponential_decay', 'natural_exp_decay', 'inverse_time_decay',
'polynomial_decay', 'piecewise_decay', 'noam_decay'
]
def _decay_step_counter(begin=0):
# the first global step is zero in learning rate decay
global_step = nn.autoincreased_step_counter(
counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1)
global_step = tensor.cast(global_step, 'float32')
return global_step
def noam_decay(d_model, warmup_steps):
"""
Noam decay method. The numpy implementation of noam decay as follows.
>>> import numpy as np
>>> lr_value = np.power(d_model, -0.5) * np.min([
>>> np.power(current_steps, -0.5),
>>> np.power(warmup_steps, -1.5) * current_steps])
Please reference `attention is all you need
<https://arxiv.org/pdf/1706.03762.pdf>`_.
Args:
d_model(Variable): The dimensionality of input and output of model.
warmup_steps(Variable): A super parameter.
Returns:
The decayed learning rate.
"""
global_step = _decay_step_counter(1)
with init_on_cpu():
a = global_step**-0.5
b = (warmup_steps**-1.5) * global_step
lr_value = (d_model**-0.5) * ops.elementwise_min(a, b)
return lr_value
def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies exponential decay to the learning rate.
```python
decayed_learning_rate = learning_rate *
decay_rate ^ (global_step / decay_steps)
```
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
# update learning_rate
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * (decay_rate**div_res)
return decayed_lr
def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies natural exponential decay to the initial learning rate.
>>> if not staircase:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
>>> else:
>>> decayed_learning_rate = learning_rate * exp(- decay_rate * (global_step / decay_steps))
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res)
return decayed_lr
def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False):
"""Applies inverse time decay to the initial learning rate.
>>> if staircase:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step))
>>> else:
>>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step)
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training.
decay_steps: A Python `int32` number.
decay_rate: A Python `float` number.
staircase: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
div_res = global_step / decay_steps
if staircase:
div_res = ops.floor(div_res)
decayed_lr = learning_rate / (1 + decay_rate * div_res)
return decayed_lr
def polynomial_decay(learning_rate,
decay_steps,
end_learning_rate=0.0001,
power=1.0,
cycle=False):
"""Applies polynomial decay to the initial learning rate.
>>> if cycle:
>>> decay_steps = decay_steps * ceil(global_step / decay_steps)
>>> else:
>>> global_step = min(global_step, decay_steps)
>>> decayed_learning_rate = (learning_rate - end_learning_rate) *
>>> (1 - global_step / decay_steps) ^ power +
>>> end_learning_rate
Args:
learning_rate: A scalar float32 value or a Variable. This
will be the initial learning rate during training
decay_steps: A Python `int32` number.
end_learning_rate: A Python `float` number.
power: A Python `float` number
cycle: Boolean. If set true, decay the learning rate every decay_steps.
Returns:
The decayed learning rate
"""
global_step = _decay_step_counter()
with init_on_cpu():
if cycle:
div_res = ops.ceil(global_step / decay_steps)
zero_var = tensor.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = tensor.fill_constant(
shape=[1], dtype='float32', value=1.0)
with control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
tensor.assign(input=one_var, output=div_res)
decay_steps = decay_steps * div_res
else:
decay_steps_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(decay_steps))
global_step = ops.elementwise_min(x=global_step, y=decay_steps_var)
decayed_lr = (learning_rate - end_learning_rate) * \
((1 - global_step / decay_steps) ** power) + end_learning_rate
return decayed_lr
def piecewise_decay(boundaries, values):
"""Applies piecewise decay to the initial learning rate.
>>> boundaries = [10000, 20000]
>>> values = [1.0, 0.5, 0.1]
>>>
>>> if step < 10000:
>>> learning_rate = 1.0
>>> elif 10000 <= step < 20000:
>>> learning_rate = 0.5
>>> else:
>>> learning_rate = 0.1
"""
if len(values) - len(boundaries) != 1:
raise ValueError("len(values) - len(boundaries) should be 1")
global_step = _decay_step_counter()
with init_on_cpu():
lr = tensor.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
with control_flow.Switch() as switch:
for i in range(len(boundaries)):
boundary_val = tensor.fill_constant(
shape=[1], dtype='float32', value=float(boundaries[i]))
value_var = tensor.fill_constant(
shape=[1], dtype='float32', value=float(values[i]))
with switch.case(global_step < boundary_val):
tensor.assign(value_var, lr)
last_value_var = tensor.fill_constant(
shape=[1],
dtype='float32',
value=float(values[len(values) - 1]))
with switch.default():
tensor.assign(last_value_var, lr)
return lr
|
{
"content_hash": "8844a22cda9d825fb7679854ca0338a1",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 102,
"avg_line_length": 33.73529411764706,
"alnum_prop": 0.5939718520363682,
"repo_name": "Canpio/Paddle",
"id": "716cc7824eff0c56cc55a055310fa8b1913ac5e6",
"size": "8638",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/layers/learning_rate_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274629"
},
{
"name": "C++",
"bytes": "4761657"
},
{
"name": "CMake",
"bytes": "209462"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "738162"
},
{
"name": "Go",
"bytes": "99765"
},
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "54402"
},
{
"name": "Python",
"bytes": "1526791"
},
{
"name": "Shell",
"bytes": "136472"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0010_add_db_contraint'),
]
operations = [
migrations.AlterField(
model_name='project',
name='catalog',
field=models.ForeignKey(help_text='The catalog which will be used for this project.', on_delete=django.db.models.deletion.CASCADE, related_name='+', to='questions.Catalog', verbose_name='Catalog'),
),
migrations.AlterField(
model_name='project',
name='description',
field=models.TextField(blank=True, help_text='A description for this project (optional).', verbose_name='Description'),
),
migrations.AlterField(
model_name='project',
name='owner',
field=models.ManyToManyField(help_text='The list of owners for this project.', to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
migrations.AlterField(
model_name='project',
name='title',
field=models.CharField(help_text='The title for this project.', max_length=256, verbose_name='Title'),
),
migrations.AlterField(
model_name='snapshot',
name='description',
field=models.TextField(blank=True, help_text='A description for this snapshot (optional).', verbose_name='Description'),
),
migrations.AlterField(
model_name='snapshot',
name='project',
field=models.ForeignKey(help_text='The project this snapshot belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='snapshots', to='projects.Project', verbose_name='Project'),
),
migrations.AlterField(
model_name='snapshot',
name='title',
field=models.CharField(help_text='The title for this snapshot.', max_length=256, verbose_name='Title'),
),
migrations.AlterField(
model_name='value',
name='attribute',
field=models.ForeignKey(blank=True, help_text='The attribute this value belongs to.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='values', to='domain.Attribute', verbose_name='Attribute'),
),
migrations.AlterField(
model_name='value',
name='collection_index',
field=models.IntegerField(default=0, help_text='The position of this value in an attribute collection.', verbose_name='Collection index'),
),
migrations.AlterField(
model_name='value',
name='option',
field=models.ForeignKey(blank=True, help_text='The option stored for this value.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='options.Option', verbose_name='Option'),
),
migrations.AlterField(
model_name='value',
name='project',
field=models.ForeignKey(help_text='The project this value belongs to.', on_delete=django.db.models.deletion.CASCADE, related_name='values', to='projects.Project', verbose_name='Project'),
),
migrations.AlterField(
model_name='value',
name='set_index',
field=models.IntegerField(default=0, help_text='The position of this value in an entity collection (i.e. in the question set)', verbose_name='Set index'),
),
migrations.AlterField(
model_name='value',
name='snapshot',
field=models.ForeignKey(blank=True, help_text='The snapshot this value belongs to.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='values', to='projects.Snapshot', verbose_name='Snapshot'),
),
migrations.AlterField(
model_name='value',
name='text',
field=models.TextField(blank=True, help_text='The string stored for this value.', null=True, verbose_name='Text'),
),
]
|
{
"content_hash": "008113ed1d12fdfa958b38c0323d901f",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 227,
"avg_line_length": 48.423529411764704,
"alnum_prop": 0.6209912536443148,
"repo_name": "DMPwerkzeug/DMPwerkzeug",
"id": "0a7ef9a7ca3b1b24613ed6b5d047fadae04c2934",
"size": "4186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/projects/migrations/0011_refactoring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9735"
},
{
"name": "HTML",
"bytes": "126570"
},
{
"name": "JavaScript",
"bytes": "46177"
},
{
"name": "Python",
"bytes": "120676"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
#from Stream import *
from ListOperators import many_to_many
from PrintFunctions import print_streams_recent, print_stream_recent
import numpy as np
def f(two_arrays):
return [np.hypot(two_arrays[0], two_arrays[1]),
two_arrays[0]/two_arrays[1]]
def example_1():
x = StreamArray('x')
y = StreamArray('y')
list_of_two_stream_arrays = many_to_many(f, [x,y], 2)
hypotenuses, tangents = list_of_two_stream_arrays
hypotenuses.set_name('hypotenuse_stream')
tangents.set_name('tangent_stream')
x.extend([3.0, 4.0])
y.extend([4.0, 3.0])
print_streams_recent([x, y,hypotenuses, tangents])
def main():
example_1()
if __name__ == '__main__':
main()
|
{
"content_hash": "67e37686e5620cb12770e1270e586db6",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 81,
"avg_line_length": 24.86111111111111,
"alnum_prop": 0.617877094972067,
"repo_name": "zatricion/Streams",
"id": "22a3989bee1de26352c7d4e8c634d2f64c8d4708",
"size": "895",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ExamplesNumpyArrayOperations/ExampleArrayManyToManyNoState.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7411"
},
{
"name": "Python",
"bytes": "72224"
}
],
"symlink_target": ""
}
|
""" Git Branch Trail model """
from django.db import models
class GitBranchTrailEntry(models.Model):
""" Git Branch Trail """
project = models.ForeignKey('gitrepo.GitProjectEntry', related_name='git_trail_project')
branch = models.ForeignKey('gitrepo.GitBranchEntry', related_name='git_trail_branch')
commit = models.ForeignKey('gitrepo.GitCommitEntry', related_name='git_trail_commit')
order = models.IntegerField(default=0)
created_at = models.DateTimeField(auto_now_add=True, editable=False)
updated_at = models.DateTimeField(auto_now=True, editable=False)
def __unicode__(self):
return u'Trail - project:{0}, name:{1}, commit:{2}, order:{3}'.format(
self.project.id,
self.branch.name,
self.commit.commit_hash,
self.order
)
|
{
"content_hash": "faa4a05d62c786a535dd29eb2d2edfc5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 92,
"avg_line_length": 41.3,
"alnum_prop": 0.6670702179176755,
"repo_name": "imvu/bluesteel",
"id": "37dd504f29a9520bb9aa92352b955692b09b57f8",
"size": "826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/logic/gitrepo/models/GitBranchTrailModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16828"
},
{
"name": "HTML",
"bytes": "119014"
},
{
"name": "JavaScript",
"bytes": "36015"
},
{
"name": "Python",
"bytes": "1220104"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing M2M table for field sponsors_to_display on 'SpecificSponsorsPlugin'
db.delete_table('sponsors_specificsponsorsplugin_sponsors_to_display')
# Adding M2M table for field sponsors on 'SpecificSponsorsPlugin'
db.create_table('sponsors_specificsponsorsplugin_sponsors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('specificsponsorsplugin', models.ForeignKey(orm['sponsors.specificsponsorsplugin'], null=False)),
('sponsor', models.ForeignKey(orm['sponsors.sponsor'], null=False))
))
db.create_unique('sponsors_specificsponsorsplugin_sponsors', ['specificsponsorsplugin_id', 'sponsor_id'])
def backwards(self, orm):
# Adding M2M table for field sponsors_to_display on 'SpecificSponsorsPlugin'
db.create_table('cmsplugin_specificsponsorsplugin_sponsors_to_display', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('specificsponsorsplugin', models.ForeignKey(orm['sponsors.specificsponsorsplugin'], null=False)),
('sponsor', models.ForeignKey(orm['sponsors.sponsor'], null=False))
))
db.create_unique('cmsplugin_specificsponsorsplugin_sponsors_to_display', ['specificsponsorsplugin_id', 'sponsor_id'])
# Removing M2M table for field sponsors on 'SpecificSponsorsPlugin'
db.delete_table('sponsors_specificsponsorsplugin_sponsors')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'sponsors.specificsponsorsplugin': {
'Meta': {'object_name': 'SpecificSponsorsPlugin', 'db_table': "'cmsplugin_specificsponsorsplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'sponsors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sponsors.Sponsor']", 'symmetrical': 'False'})
},
'sponsors.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['sponsors']
|
{
"content_hash": "665cf47fd06dcaeca926429b8215fda4",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 189,
"avg_line_length": 64.41891891891892,
"alnum_prop": 0.6079295154185022,
"repo_name": "frinat/fribourg-natation.ch",
"id": "24e4441de0db0f76c5366162ff1de8cba0946f5a",
"size": "4785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "frinat/sponsors/migrations/0002_auto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "34907"
},
{
"name": "JavaScript",
"bytes": "709"
},
{
"name": "Python",
"bytes": "65739"
}
],
"symlink_target": ""
}
|
import logging
import logging.config
def init_logging(file=None, file_level=logging.DEBUG, stdout=True):
config = {
'version': 1,
'formatters': {
'consoleFormatter': {
'format': '%(levelname)s %(message)s'
},
'fileFormatter': {
'format': '%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s'
},
},
'handlers': {
'consoleHandler': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'consoleFormatter',
'stream': 'ext://sys.stdout',
},
},
'loggers': {
'': {
'handlers': [],
'level': 'DEBUG',
}
}
}
if file is not None:
config['handlers']['fileHandler'] = {
'level': file_level,
'class': 'logging.FileHandler',
'formatter': 'fileFormatter',
'filename': file,
}
config['loggers']['']['handlers'].append('fileHandler')
if stdout:
config['loggers']['']['handlers'].append('consoleHandler')
logging.config.dictConfig(config)
|
{
"content_hash": "3847cac7b492af565821249bec51fa34",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 91,
"avg_line_length": 29.951219512195124,
"alnum_prop": 0.4576547231270358,
"repo_name": "thekot/cam_to_webm",
"id": "1b4c88ffdff3c593349e2637a18ec78d6ccf2037",
"size": "1228",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/init_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13603"
},
{
"name": "Shell",
"bytes": "2171"
}
],
"symlink_target": ""
}
|
from flask import Blueprint
url_prefix = '/'
main = Blueprint('main', __name__, url_prefix=url_prefix,
template_folder='templates')
from .views import *
|
{
"content_hash": "5b356438900cc902fdbce15756f8295f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 57,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.6470588235294118,
"repo_name": "pjryan126/flask-docker",
"id": "8a3245cd9745d5edeefd2af3a01111613acff9fb",
"size": "170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/app/blueprints/main/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7149"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0005_auto_20160717_2320'),
]
operations = [
migrations.CreateModel(
name='TeamPairing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
('white_points', models.PositiveIntegerField(default=0)),
('black_points', models.PositiveIntegerField(default=0)),
('black_team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pairings_as_black', to='tournament.Team')),
('round', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tournament.Round')),
('white_team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='pairings_as_white', to='tournament.Team')),
],
options={
'abstract': False,
},
),
migrations.RemoveField(
model_name='pairing',
name='black_team',
),
migrations.RemoveField(
model_name='pairing',
name='round',
),
migrations.RemoveField(
model_name='pairing',
name='white_team',
),
migrations.AddField(
model_name='pairing',
name='team_pairing',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to='tournament.TeamPairing'),
preserve_default=False,
),
]
|
{
"content_hash": "0dc66018e8fb5607a56d143834fad38b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 151,
"avg_line_length": 39.52173913043478,
"alnum_prop": 0.5764576457645765,
"repo_name": "cyanfish/heltour",
"id": "be54dec1d58da5e6f01ed4a78315f564ce97ebcd",
"size": "1892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heltour/tournament/migrations/0006_auto_20160718_2011.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13951"
},
{
"name": "HTML",
"bytes": "310481"
},
{
"name": "JavaScript",
"bytes": "26784"
},
{
"name": "Python",
"bytes": "902629"
},
{
"name": "SCSS",
"bytes": "32099"
},
{
"name": "Shell",
"bytes": "4551"
}
],
"symlink_target": ""
}
|
''' Parameters are one of the core concepts of Luigi.
All Parameters sit on :class:`~luigi.task.Task` classes.
See :ref:`Parameter` for more info on how to define parameters.
'''
import abc
import datetime
import warnings
import json
from json import JSONEncoder
from collections import OrderedDict, Mapping
import operator
import functools
from ast import literal_eval
try:
from ConfigParser import NoOptionError, NoSectionError
except ImportError:
from configparser import NoOptionError, NoSectionError
from luigi import task_register
from luigi import six
from luigi import configuration
from luigi.cmdline_parser import CmdlineParser
_no_value = object()
class ParameterException(Exception):
"""
Base exception.
"""
pass
class MissingParameterException(ParameterException):
"""
Exception signifying that there was a missing Parameter.
"""
pass
class UnknownParameterException(ParameterException):
"""
Exception signifying that an unknown Parameter was supplied.
"""
pass
class DuplicateParameterException(ParameterException):
"""
Exception signifying that a Parameter was specified multiple times.
"""
pass
class Parameter(object):
"""
An untyped Parameter
Parameters are objects set on the Task class level to make it possible to parameterize tasks.
For instance:
.. code:: python
class MyTask(luigi.Task):
foo = luigi.Parameter()
class RequiringTask(luigi.Task):
def requires(self):
return MyTask(foo="hello")
def run(self):
print(self.requires().foo) # prints "hello"
This makes it possible to instantiate multiple tasks, eg ``MyTask(foo='bar')`` and
``MyTask(foo='baz')``. The task will then have the ``foo`` attribute set appropriately.
When a task is instantiated, it will first use any argument as the value of the parameter, eg.
if you instantiate ``a = TaskA(x=44)`` then ``a.x == 44``. When the value is not provided, the
value will be resolved in this order of falling priority:
* Any value provided on the command line:
- To the root task (eg. ``--param xyz``)
- Then to the class, using the qualified task name syntax (eg. ``--TaskA-param xyz``).
* With ``[TASK_NAME]>PARAM_NAME: <serialized value>`` syntax. See :ref:`ParamConfigIngestion`
* Any default value set using the ``default`` flag.
There are subclasses of ``Parameter`` that define what type the parameter has. This is not
enforced within Python, but are used for command line interaction.
Parameter objects may be reused, but you must then set the ``positional=False`` flag.
"""
_counter = 0 # non-atomically increasing counter used for ordering parameters.
def __init__(self, default=_no_value, is_global=False, significant=True, description=None,
config_path=None, positional=True, always_in_help=False, batch_method=None):
"""
:param default: the default value for this parameter. This should match the type of the
Parameter, i.e. ``datetime.date`` for ``DateParameter`` or ``int`` for
``IntParameter``. By default, no default is stored and
the value must be specified at runtime.
:param bool significant: specify ``False`` if the parameter should not be treated as part of
the unique identifier for a Task. An insignificant Parameter might
also be used to specify a password or other sensitive information
that should not be made public via the scheduler. Default:
``True``.
:param str description: A human-readable string describing the purpose of this Parameter.
For command-line invocations, this will be used as the `help` string
shown to users. Default: ``None``.
:param dict config_path: a dictionary with entries ``section`` and ``name``
specifying a config file entry from which to read the
default value for this parameter. DEPRECATED.
Default: ``None``.
:param bool positional: If true, you can set the argument as a
positional argument. It's true by default but we recommend
``positional=False`` for abstract base classes and similar cases.
:param bool always_in_help: For the --help option in the command line
parsing. Set true to always show in --help.
:param function(iterable[A])->A batch_method: Method to combine an iterable of parsed
parameter values into a single value. Used
when receiving batched parameter lists from
the scheduler. See :ref:`batch_method`
"""
self._default = default
self._batch_method = batch_method
if is_global:
warnings.warn("is_global support is removed. Assuming positional=False",
DeprecationWarning,
stacklevel=2)
positional = False
self.significant = significant # Whether different values for this parameter will differentiate otherwise equal tasks
self.positional = positional
self.description = description
self.always_in_help = always_in_help
if config_path is not None and ('section' not in config_path or 'name' not in config_path):
raise ParameterException('config_path must be a hash containing entries for section and name')
self._config_path = config_path
self._counter = Parameter._counter # We need to keep track of this to get the order right (see Task class)
Parameter._counter += 1
def _get_value_from_config(self, section, name):
"""Loads the default from the config. Returns _no_value if it doesn't exist"""
conf = configuration.get_config()
try:
value = conf.get(section, name)
except (NoSectionError, NoOptionError):
return _no_value
return self.parse(value)
def _get_value(self, task_name, param_name):
for value, warn in self._value_iterator(task_name, param_name):
if value != _no_value:
if warn:
warnings.warn(warn, DeprecationWarning)
return value
return _no_value
def _value_iterator(self, task_name, param_name):
"""
Yield the parameter values, with optional deprecation warning as second tuple value.
The parameter value will be whatever non-_no_value that is yielded first.
"""
cp_parser = CmdlineParser.get_instance()
if cp_parser:
dest = self._parser_global_dest(param_name, task_name)
found = getattr(cp_parser.known_args, dest, None)
yield (self._parse_or_no_value(found), None)
yield (self._get_value_from_config(task_name, param_name), None)
yield (self._get_value_from_config(task_name, param_name.replace('_', '-')),
'Configuration [{}] {} (with dashes) should be avoided. Please use underscores.'.format(
task_name, param_name))
if self._config_path:
yield (self._get_value_from_config(self._config_path['section'], self._config_path['name']),
'The use of the configuration [{}] {} is deprecated. Please use [{}] {}'.format(
self._config_path['section'], self._config_path['name'], task_name, param_name))
yield (self._default, None)
def has_task_value(self, task_name, param_name):
return self._get_value(task_name, param_name) != _no_value
def task_value(self, task_name, param_name):
value = self._get_value(task_name, param_name)
if value == _no_value:
raise MissingParameterException("No default specified")
else:
return self.normalize(value)
def _is_batchable(self):
return self._batch_method is not None
def parse(self, x):
"""
Parse an individual value from the input.
The default implementation is the identity function, but subclasses should override
this method for specialized parsing.
:param str x: the value to parse.
:return: the parsed value.
"""
return x # default impl
def _parse_list(self, xs):
"""
Parse a list of values from the scheduler.
Only possible if this is_batchable() is True. This will combine the list into a single
parameter value using batch method. This should never need to be overridden.
:param xs: list of values to parse and combine
:return: the combined parsed values
"""
if not self._is_batchable():
raise NotImplementedError('No batch method found')
elif not xs:
raise ValueError('Empty parameter list passed to parse_list')
else:
return self._batch_method(map(self.parse, xs))
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
if not isinstance(x, six.string_types) and self.__class__ == Parameter:
warnings.warn("Parameter {0} is not of type string.".format(str(x)))
return str(x)
def normalize(self, x):
"""
Given a parsed parameter value, normalizes it.
The value can either be the result of parse(), the default value or
arguments passed into the task's constructor by instantiation.
This is very implementation defined, but can be used to validate/clamp
valid values. For example, if you wanted to only accept even integers,
and "correct" odd values to the nearest integer, you can implement
normalize as ``x // 2 * 2``.
"""
return x # default impl
def next_in_enumeration(self, _value):
"""
If your Parameter type has an enumerable ordering of values. You can
choose to override this method. This method is used by the
:py:mod:`luigi.execution_summary` module for pretty printing
purposes. Enabling it to pretty print tasks like ``MyTask(num=1),
MyTask(num=2), MyTask(num=3)`` to ``MyTask(num=1..3)``.
:param value: The value
:return: The next value, like "value + 1". Or ``None`` if there's no enumerable ordering.
"""
return None
def _parse_or_no_value(self, x):
if not x:
return _no_value
else:
return self.parse(x)
@staticmethod
def _parser_global_dest(param_name, task_name):
return task_name + '_' + param_name
@staticmethod
def _parser_action():
return "store"
_UNIX_EPOCH = datetime.datetime.utcfromtimestamp(0)
class _DateParameterBase(Parameter):
"""
Base class Parameter for date (not datetime).
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DateParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH.date()
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
def parse(self, s):
"""
Parses a date string formatted like ``YYYY-MM-DD``.
"""
return datetime.datetime.strptime(s, self.date_format).date()
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DateParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
class DateParameter(_DateParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.date`.
A DateParameter is a Date string formatted ``YYYY-MM-DD``. For example, ``2013-07-10`` specifies
July 10, 2013.
DateParameters are 90% of the time used to be interpolated into file system paths or the like.
Here is a gentle reminder of how to interpolate date parameters into strings:
.. code:: python
class MyTask(luigi.Task):
date = luigi.DateParameter()
def run(self):
templated_path = "/my/path/to/my/dataset/{date:%Y/%m/%d}/"
instantiated_path = templated_path.format(date=self.date)
# print(instantiated_path) --> /my/path/to/my/dataset/2016/06/09/
# ... use instantiated_path ...
To set this parameter to default to the current day. You can write code like this:
.. code:: python
import datetime
class MyTask(luigi.Task):
date = luigi.DateParameter(default=datetime.date.today())
"""
date_format = '%Y-%m-%d'
def next_in_enumeration(self, value):
return value + datetime.timedelta(days=self.interval)
def normalize(self, value):
if value is None:
return None
if isinstance(value, datetime.datetime):
value = value.date()
delta = (value - self.start).days % self.interval
return value - datetime.timedelta(days=delta)
class MonthParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the month
(day of :py:class:`~datetime.date` is "rounded" to first of the month).
A MonthParameter is a Date string formatted ``YYYY-MM``. For example, ``2013-07`` specifies
July of 2013.
"""
date_format = '%Y-%m'
def _add_months(self, date, months):
"""
Add ``months`` months to ``date``.
Unfortunately we can't use timedeltas to add months because timedelta counts in days
and there's no foolproof way to add N months in days without counting the number of
days per month.
"""
year = date.year + (date.month + months - 1) // 12
month = (date.month + months - 1) % 12 + 1
return datetime.date(year=year, month=month, day=1)
def next_in_enumeration(self, value):
return self._add_months(value, self.interval)
def normalize(self, value):
if value is None:
return None
months_since_start = (value.year - self.start.year) * 12 + (value.month - self.start.month)
months_since_start -= months_since_start % self.interval
return self._add_months(self.start, months_since_start)
class YearParameter(DateParameter):
"""
Parameter whose value is a :py:class:`~datetime.date`, specified to the year
(day and month of :py:class:`~datetime.date` is "rounded" to first day of the year).
A YearParameter is a Date string formatted ``YYYY``.
"""
date_format = '%Y'
def next_in_enumeration(self, value):
return value.replace(year=value.year + self.interval)
def normalize(self, value):
if value is None:
return None
delta = (value.year - self.start.year) % self.interval
return datetime.date(year=value.year - delta, month=1, day=1)
class _DatetimeParameterBase(Parameter):
"""
Base class Parameter for datetime
"""
def __init__(self, interval=1, start=None, **kwargs):
super(_DatetimeParameterBase, self).__init__(**kwargs)
self.interval = interval
self.start = start if start is not None else _UNIX_EPOCH
@abc.abstractproperty
def date_format(self):
"""
Override me with a :py:meth:`~datetime.date.strftime` string.
"""
pass
@abc.abstractproperty
def _timedelta(self):
"""
How to move one interval of this type forward (i.e. not counting self.interval).
"""
pass
def parse(self, s):
"""
Parses a string to a :py:class:`~datetime.datetime`.
"""
return datetime.datetime.strptime(s, self.date_format)
def serialize(self, dt):
"""
Converts the date to a string using the :py:attr:`~_DatetimeParameterBase.date_format`.
"""
if dt is None:
return str(dt)
return dt.strftime(self.date_format)
def normalize(self, dt):
"""
Clamp dt to every Nth :py:attr:`~_DatetimeParameterBase.interval` starting at
:py:attr:`~_DatetimeParameterBase.start`.
"""
if dt is None:
return None
dt = dt.replace(microsecond=0) # remove microseconds, to avoid float rounding issues.
delta = (dt - self.start).total_seconds()
granularity = (self._timedelta * self.interval).total_seconds()
return dt - datetime.timedelta(seconds=delta % granularity)
def next_in_enumeration(self, value):
return value + self._timedelta * self.interval
class DateHourParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the hour.
A DateHourParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the hour. For example, ``2013-07-10T19`` specifies July 10, 2013 at
19:00.
"""
date_format = '%Y-%m-%dT%H' # ISO 8601 is to use 'T'
_timedelta = datetime.timedelta(hours=1)
class DateMinuteParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the minute.
A DateMinuteParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the minute. For example, ``2013-07-10T1907`` specifies July 10, 2013 at
19:07.
The interval parameter can be used to clamp this parameter to every N minutes, instead of every minute.
"""
date_format = '%Y-%m-%dT%H%M'
_timedelta = datetime.timedelta(minutes=1)
deprecated_date_format = '%Y-%m-%dT%HH%M'
def parse(self, s):
try:
value = datetime.datetime.strptime(s, self.deprecated_date_format)
warnings.warn(
'Using "H" between hours and minutes is deprecated, omit it instead.',
DeprecationWarning,
stacklevel=2
)
return value
except ValueError:
return super(DateMinuteParameter, self).parse(s)
class DateSecondParameter(_DatetimeParameterBase):
"""
Parameter whose value is a :py:class:`~datetime.datetime` specified to the second.
A DateSecondParameter is a `ISO 8601 <http://en.wikipedia.org/wiki/ISO_8601>`_ formatted
date and time specified to the second. For example, ``2013-07-10T190738`` specifies July 10, 2013 at
19:07:38.
The interval parameter can be used to clamp this parameter to every N seconds, instead of every second.
"""
date_format = '%Y-%m-%dT%H%M%S'
_timedelta = datetime.timedelta(seconds=1)
class IntParameter(Parameter):
"""
Parameter whose value is an ``int``.
"""
def parse(self, s):
"""
Parses an ``int`` from the string using ``int()``.
"""
return int(s)
def next_in_enumeration(self, value):
return value + 1
class FloatParameter(Parameter):
"""
Parameter whose value is a ``float``.
"""
def parse(self, s):
"""
Parses a ``float`` from the string using ``float()``.
"""
return float(s)
class BoolParameter(Parameter):
"""
A Parameter whose value is a ``bool``. This parameter have an implicit
default value of ``False``.
"""
def __init__(self, *args, **kwargs):
super(BoolParameter, self).__init__(*args, **kwargs)
if self._default == _no_value:
self._default = False
def parse(self, s):
"""
Parses a ``bool`` from the string, matching 'true' or 'false' ignoring case.
"""
return {'true': True, 'false': False}[str(s).lower()]
def normalize(self, value):
# coerce anything truthy to True
return bool(value) if value is not None else None
@staticmethod
def _parser_action():
return 'store_true'
class BooleanParameter(BoolParameter):
"""
DEPRECATED. Use :py:class:`~BoolParameter`
"""
def __init__(self, *args, **kwargs):
warnings.warn(
'BooleanParameter is deprecated, use BoolParameter instead',
DeprecationWarning,
stacklevel=2
)
super(BooleanParameter, self).__init__(*args, **kwargs)
class DateIntervalParameter(Parameter):
"""
A Parameter whose value is a :py:class:`~luigi.date_interval.DateInterval`.
Date Intervals are specified using the ISO 8601 date notation for dates
(eg. "2015-11-04"), months (eg. "2015-05"), years (eg. "2015"), or weeks
(eg. "2015-W35"). In addition, it also supports arbitrary date intervals
provided as two dates separated with a dash (eg. "2015-11-04-2015-12-04").
"""
def parse(self, s):
"""
Parses a :py:class:`~luigi.date_interval.DateInterval` from the input.
see :py:mod:`luigi.date_interval`
for details on the parsing of DateIntervals.
"""
# TODO: can we use xml.utils.iso8601 or something similar?
from luigi import date_interval as d
for cls in [d.Year, d.Month, d.Week, d.Date, d.Custom]:
i = cls.parse(s)
if i:
return i
raise ValueError('Invalid date interval - could not be parsed')
class TimeDeltaParameter(Parameter):
"""
Class that maps to timedelta using strings in any of the following forms:
* ``n {w[eek[s]]|d[ay[s]]|h[our[s]]|m[inute[s]|s[second[s]]}`` (e.g. "1 week 2 days" or "1 h")
Note: multiple arguments must be supplied in longest to shortest unit order
* ISO 8601 duration ``PnDTnHnMnS`` (each field optional, years and months not supported)
* ISO 8601 duration ``PnW``
See https://en.wikipedia.org/wiki/ISO_8601#Durations
"""
def _apply_regex(self, regex, input):
import re
re_match = re.match(regex, input)
if re_match:
kwargs = {}
has_val = False
for k, v in six.iteritems(re_match.groupdict(default="0")):
val = int(v)
has_val = has_val or val != 0
kwargs[k] = val
if has_val:
return datetime.timedelta(**kwargs)
def _parseIso8601(self, input):
def field(key):
return r"(?P<%s>\d+)%s" % (key, key[0].upper())
def optional_field(key):
return "(%s)?" % field(key)
# A little loose: ISO 8601 does not allow weeks in combination with other fields, but this regex does (as does python timedelta)
regex = "P(%s|%s(T%s)?)" % (field("weeks"), optional_field("days"), "".join([optional_field(key) for key in ["hours", "minutes", "seconds"]]))
return self._apply_regex(regex, input)
def _parseSimple(self, input):
keys = ["weeks", "days", "hours", "minutes", "seconds"]
# Give the digits a regex group name from the keys, then look for text with the first letter of the key,
# optionally followed by the rest of the word, with final char (the "s") optional
regex = "".join([r"((?P<%s>\d+) ?%s(%s)?(%s)? ?)?" % (k, k[0], k[1:-1], k[-1]) for k in keys])
return self._apply_regex(regex, input)
def parse(self, input):
"""
Parses a time delta from the input.
See :py:class:`TimeDeltaParameter` for details on supported formats.
"""
result = self._parseIso8601(input)
if not result:
result = self._parseSimple(input)
if result:
return result
else:
raise ParameterException("Invalid time delta - could not parse %s" % input)
class TaskParameter(Parameter):
"""
A parameter that takes another luigi task class.
When used programatically, the parameter should be specified
directly with the :py:class:`luigi.task.Task` (sub) class. Like
``MyMetaTask(my_task_param=my_tasks.MyTask)``. On the command line,
you specify the :py:attr:`luigi.task.Task.task_family`. Like
.. code-block:: console
$ luigi --module my_tasks MyMetaTask --my_task_param my_namespace.MyTask
Where ``my_namespace.MyTask`` is defined in the ``my_tasks`` python module.
When the :py:class:`luigi.task.Task` class is instantiated to an object.
The value will always be a task class (and not a string).
"""
def parse(self, input):
"""
Parse a task_famly using the :class:`~luigi.task_register.Register`
"""
return task_register.Register.get_task_cls(input)
def serialize(self, cls):
"""
Converts the :py:class:`luigi.task.Task` (sub) class to its family name.
"""
return cls.task_family
class EnumParameter(Parameter):
"""
A parameter whose value is an :class:`~enum.Enum`.
In the task definition, use
.. code-block:: python
class Model(enum.Enum):
Honda = 1
Volvo = 2
class MyTask(luigi.Task):
my_param = luigi.EnumParameter(enum=Model)
At the command line, use,
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param Honda
"""
def __init__(self, *args, **kwargs):
if 'enum' not in kwargs:
raise ParameterException('An enum class must be specified.')
self._enum = kwargs.pop('enum')
super(EnumParameter, self).__init__(*args, **kwargs)
def parse(self, s):
try:
return self._enum[s]
except KeyError:
raise ValueError('Invalid enum value - could not be parsed')
def serialize(self, e):
return e.name
class FrozenOrderedDict(Mapping):
"""
It is an immutable wrapper around ordered dictionaries that implements the complete :py:class:`collections.Mapping`
interface. It can be used as a drop-in replacement for dictionaries where immutability and ordering are desired.
"""
def __init__(self, *args, **kwargs):
self.__dict = OrderedDict(*args, **kwargs)
self.__hash = None
def __getitem__(self, key):
return self.__dict[key]
def __iter__(self):
return iter(self.__dict)
def __len__(self):
return len(self.__dict)
def __repr__(self):
return '<FrozenOrderedDict %s>' % repr(self.__dict)
def __hash__(self):
if self.__hash is None:
hashes = map(hash, self.items())
self.__hash = functools.reduce(operator.xor, hashes, 0)
return self.__hash
def get_wrapped(self):
return self.__dict
class DictParameter(Parameter):
"""
Parameter whose value is a ``dict``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
tags = luigi.DictParameter()
def run(self):
logging.info("Find server with role: %s", self.tags['role'])
server = aws.ec2.find_my_resource(self.tags)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --tags <JSON string>
Simple example with two tags:
.. code-block:: console
$ luigi --module my_tasks MyTask --tags '{"role": "web", "env": "staging"}'
It can be used to define dynamic parameters, when you do not know the exact list of your parameters (e.g. list of
tags, that are dynamically constructed outside Luigi), or you have a complex parameter containing logically related
values (like a database connection config).
"""
class DictParamEncoder(JSONEncoder):
"""
JSON encoder for :py:class:`~DictParameter`, which makes :py:class:`~FrozenOrderedDict` JSON serializable.
"""
def default(self, obj):
if isinstance(obj, FrozenOrderedDict):
return obj.get_wrapped()
return json.JSONEncoder.default(self, obj)
def normalize(self, value):
"""
Ensure that dictionary parameter is converted to a FrozenOrderedDict so it can be hashed.
"""
return FrozenOrderedDict(value)
def parse(self, s):
"""
Parses an immutable and ordered ``dict`` from a JSON string using standard JSON library.
We need to use an immutable dictionary, to create a hashable parameter and also preserve the internal structure
of parsing. The traversal order of standard ``dict`` is undefined, which can result various string
representations of this parameter, and therefore a different task id for the task containing this parameter.
This is because task id contains the hash of parameters' JSON representation.
:param s: String to be parse
"""
return json.loads(s, object_pairs_hook=FrozenOrderedDict)
def serialize(self, x):
return json.dumps(x, cls=DictParameter.DictParamEncoder)
class ListParameter(Parameter):
"""
Parameter whose value is a ``list``.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
grades = luigi.ListParameter()
def run(self):
sum = 0
for element in self.grades:
sum += element
avg = sum / len(self.grades)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --grades <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --grades '[100,70]'
"""
def normalize(self, x):
"""
Ensure that list parameter is converted to a tuple so it can be hashed.
:param str x: the value to parse.
:return: the normalized (hashable/immutable) value.
"""
return tuple(x)
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
return list(json.loads(x))
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return json.dumps(x)
class TupleParameter(Parameter):
"""
Parameter whose value is a ``tuple`` or ``tuple`` of tuples.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
book_locations = luigi.TupleParameter()
def run(self):
for location in self.book_locations:
print("Go to page %d, line %d" % (location[0], location[1]))
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations <JSON string>
Simple example with two grades:
.. code-block:: console
$ luigi --module my_tasks MyTask --book_locations '((12,3),(4,15),(52,1))'
"""
def parse(self, x):
"""
Parse an individual value from the input.
:param str x: the value to parse.
:return: the parsed value.
"""
# Since the result of json.dumps(tuple) differs from a tuple string, we must handle either case.
# A tuple string may come from a config file or from cli execution.
# t = ((1, 2), (3, 4))
# t_str = '((1,2),(3,4))'
# t_json_str = json.dumps(t)
# t_json_str == '[[1, 2], [3, 4]]'
# json.loads(t_json_str) == t
# json.loads(t_str) == ValueError: No JSON object could be decoded
# Therefore, if json.loads(x) returns a ValueError, try ast.literal_eval(x).
# ast.literal_eval(t_str) == t
try:
return tuple(tuple(x) for x in json.loads(x)) # loop required to parse tuple of tuples
except ValueError:
return literal_eval(x) # if this causes an error, let that error be raised.
def serialize(self, x):
"""
Opposite of :py:meth:`parse`.
Converts the value ``x`` to a string.
:param x: the value to serialize.
"""
return json.dumps(x)
class NumericalParameter(Parameter):
"""
Parameter whose value is a number of the specified type, e.g. ``int`` or
``float`` and in the range specified.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
my_param_1 = luigi.NumericalParameter(
var_type=int, min_value=-3, max_value=7) # -3 <= my_param_1 < 7
my_param_2 = luigi.NumericalParameter(
var_type=int, min_value=-3, max_value=7, left_op=operator.lt, right_op=operator.le) # -3 < my_param_2 <= 7
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param-1 -3 --my-param-2 -2
"""
def __init__(self, left_op=operator.le, right_op=operator.lt, *args, **kwargs):
"""
:param function var_type: The type of the input variable, e.g. int or float.
:param min_value: The minimum value permissible in the accepted values
range. May be inclusive or exclusive based on left_op parameter.
This should be the same type as var_type.
:param max_value: The maximum value permissible in the accepted values
range. May be inclusive or exclusive based on right_op parameter.
This should be the same type as var_type.
:param function left_op: The comparison operator for the left-most comparison in
the expression ``min_value left_op value right_op value``.
This operator should generally be either
``operator.lt`` or ``operator.le``.
Default: ``operator.le``.
:param function right_op: The comparison operator for the right-most comparison in
the expression ``min_value left_op value right_op value``.
This operator should generally be either
``operator.lt`` or ``operator.le``.
Default: ``operator.lt``.
"""
if "var_type" not in kwargs:
raise ParameterException("var_type must be specified")
self._var_type = kwargs.pop("var_type")
if "min_value" not in kwargs:
raise ParameterException("min_value must be specified")
self._min_value = kwargs.pop("min_value")
if "max_value" not in kwargs:
raise ParameterException("max_value must be specified")
self._max_value = kwargs.pop("max_value")
self._left_op = left_op
self._right_op = right_op
self._permitted_range = (
"{var_type} in {left_endpoint}{min_value}, {max_value}{right_endpoint}".format(
var_type=self._var_type.__name__,
min_value=self._min_value, max_value=self._max_value,
left_endpoint="[" if left_op == operator.le else "(",
right_endpoint=")" if right_op == operator.lt else "]"))
super(NumericalParameter, self).__init__(*args, **kwargs)
if self.description:
self.description += " "
else:
self.description = ""
self.description += "permitted values: " + self._permitted_range
def parse(self, s):
value = self._var_type(s)
if (self._left_op(self._min_value, value) and self._right_op(value, self._max_value)):
return value
else:
raise ValueError(
"{s} is not in the set of {permitted_range}".format(
s=s, permitted_range=self._permitted_range))
class ChoiceParameter(Parameter):
"""
A parameter which takes two values:
1. an instance of :class:`~collections.Iterable` and
2. the class of the variables to convert to.
In the task definition, use
.. code-block:: python
class MyTask(luigi.Task):
my_param = luigi.ChoiceParameter(choices=[0.1, 0.2, 0.3], var_type=float)
At the command line, use
.. code-block:: console
$ luigi --module my_tasks MyTask --my-param 0.1
Consider using :class:`~luigi.EnumParameter` for a typed, structured
alternative. This class can perform the same role when all choices are the
same type and transparency of parameter value on the command line is
desired.
"""
def __init__(self, var_type=str, *args, **kwargs):
"""
:param function var_type: The type of the input variable, e.g. str, int,
float, etc.
Default: str
:param choices: An iterable, all of whose elements are of `var_type` to
restrict parameter choices to.
"""
if "choices" not in kwargs:
raise ParameterException("A choices iterable must be specified")
self._choices = set(kwargs.pop("choices"))
self._var_type = var_type
assert all(type(choice) is self._var_type for choice in self._choices), "Invalid type in choices"
super(ChoiceParameter, self).__init__(*args, **kwargs)
if self.description:
self.description += " "
else:
self.description = ""
self.description += (
"Choices: {" + ", ".join(str(choice) for choice in self._choices) + "}")
def parse(self, s):
var = self._var_type(s)
if var in self._choices:
return var
else:
raise ValueError("{s} is not a valid choice from {choices}".format(
s=s, choices=self._choices))
|
{
"content_hash": "4fcb5dc7dfcd069ce4f1797603e0c35b",
"timestamp": "",
"source": "github",
"line_count": 1107,
"max_line_length": 150,
"avg_line_length": 34.63414634146341,
"alnum_prop": 0.5992175273865414,
"repo_name": "Viktor-Evst/fixed-luigi",
"id": "fb4d30a8e0e00c30606e3e4f825e770393d9884d",
"size": "38944",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "luigi/parameter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "36680"
},
{
"name": "JavaScript",
"bytes": "84223"
},
{
"name": "Python",
"bytes": "1625261"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
"""Abstraction of the underlying virtualization API."""
import sys
from nova import flags
from nova import log as logging
from nova import utils
from nova.virt import driver
from nova.virt import fake
from nova.virt import firewall
from nova.virt import hyperv
from nova.virt import vmwareapi_conn
from nova.virt import xenapi_conn
from nova.virt.libvirt import connection as libvirt_conn
LOG = logging.getLogger("nova.virt.connection")
FLAGS = flags.FLAGS
def get_connection(read_only=False):
"""
Returns an object representing the connection to a virtualization
platform.
This could be :mod:`nova.virt.fake.FakeConnection` in test mode,
a connection to KVM, QEMU, or UML via :mod:`libvirt_conn`, or a connection
to XenServer or Xen Cloud Platform via :mod:`xenapi`.
Any object returned here must conform to the interface documented by
:mod:`FakeConnection`.
**Related flags**
:connection_type: A string literal that falls through a if/elif structure
to determine what virtualization mechanism to use.
Values may be
* fake
* libvirt
* xenapi
"""
# TODO(termie): maybe lazy load after initial check for permissions
# TODO(termie): check whether we can be disconnected
t = FLAGS.connection_type
if t == 'fake':
conn = fake.get_connection(read_only)
elif t == 'libvirt':
conn = libvirt_conn.get_connection(read_only)
elif t == 'xenapi':
conn = xenapi_conn.get_connection(read_only)
elif t == 'hyperv':
conn = hyperv.get_connection(read_only)
elif t == 'vmwareapi':
conn = vmwareapi_conn.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
return utils.check_isinstance(conn, driver.ComputeDriver)
|
{
"content_hash": "a74fd0ceb7381ab7b9494a9115541fc0",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 32.435483870967744,
"alnum_prop": 0.6578816509199403,
"repo_name": "salv-orlando/MyRepo",
"id": "6875ccdd9ed584a6f2664c93a7223dfd32425c28",
"size": "2830",
"binary": false,
"copies": "1",
"ref": "refs/heads/bp/xenapi-security-groups",
"path": "nova/virt/connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4477933"
},
{
"name": "Shell",
"bytes": "34174"
}
],
"symlink_target": ""
}
|
"""Module for example of listener."""
import sys
import os
import time
from cyber_py import cyber
from cyber.proto.unit_test_pb2 import ChatterBenchmark
def test_client_class():
"""
Client send request
"""
node = cyber.Node("client_node")
client = node.create_client(
"server_01", ChatterBenchmark, ChatterBenchmark)
req = ChatterBenchmark()
req.content = "clt:Hello service!"
req.seq = 0
count = 0
while not cyber.is_shutdown():
time.sleep(1)
count = count + 1
req.seq = count
print "-" * 80
response = client.send_request(req)
print "get Response [ ", response, " ]"
if __name__ == '__main__':
cyber.init()
test_client_class()
cyber.shutdown()
|
{
"content_hash": "cbfb3a3b438540d8669ce51028a9fb82",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 56,
"avg_line_length": 23.6875,
"alnum_prop": 0.6068601583113457,
"repo_name": "wanglei828/apollo",
"id": "500b6684a04844da661672fd7026373456446e29",
"size": "1540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyber/python/examples/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "22662"
},
{
"name": "C++",
"bytes": "17378263"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "CSS",
"bytes": "40785"
},
{
"name": "Cuda",
"bytes": "97324"
},
{
"name": "Dockerfile",
"bytes": "11960"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "21068"
},
{
"name": "JavaScript",
"bytes": "364183"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1902086"
},
{
"name": "Shell",
"bytes": "302902"
},
{
"name": "Smarty",
"bytes": "33258"
}
],
"symlink_target": ""
}
|
"""
Django settings for mptt_424 project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k&it&s7f)e7^*@0ri$t94nip25)0c_2i69k45trt2lf+bvkqc2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'mptt',
'mptt_424_app',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mptt_424.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': True,
},
},
]
WSGI_APPLICATION = 'mptt_424.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "ac15c6022e7d8ea416b6f1f3d3f69b77",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 91,
"avg_line_length": 26.071428571428573,
"alnum_prop": 0.680365296803653,
"repo_name": "Wtower/django-mptt-424",
"id": "75acdb61a37a8e20896316ab96ca2f16c7883c8f",
"size": "3285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mptt_424/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2137"
},
{
"name": "Python",
"bytes": "10075"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, print_function, unicode_literals
import time
class Clock(object):
""" Logical timestamp generator.
"""
def __init__(self, timestamp=None):
if timestamp:
self.l = (timestamp >> 16) & 0xffffffffffff
self.c = timestamp & 0xffff
else:
self.l = 0
self.c = 0
self.tick()
def update(self, rt=None):
""" Updates the clock with a received timestamp from another node.
:param rt: received timestamp
"""
ml = (rt >> 16) & 0xffffffffffff
mc = rt & 0xffff
old_l = self.l
pt = int(round((time.time() + 0.0005) * 1000.0))
self.l = max(old_l, ml, pt)
if self.l == old_l == ml:
self.c = max(self.c, mc) + 1
elif self.l == old_l:
self.c += 1
elif self.l == ml:
self.c = mc + 1
else:
self.c = 0
def tick(self):
""" Updates and tick the clock for local or send events.
:return: the updated timestamp
"""
# local or send event
old_l = self.l
pt = int(round((time.time() + 0.0005) * 1000.0))
self.l = max(old_l, pt)
if self.l == old_l:
self.c += 1
else:
self.c = 0
return self.timestamp()
def timestamp(self):
""" Gets the current timestamp without updating counter.
:return: the timestamp
"""
return (self.l << 16) | self.c
def seconds(self):
""" Gets the value compatible with time.time() function.
:return: the float value represent seconds from epoc
"""
return (self.l/1000.0) + (self.c/65536.0)
def __hash__(self):
return self.timestamp()
def __cmp__(self, other):
if self.l != other.l:
if self.l > other.l:
return 1
if self.l < other.l:
return -1
if self.c != other.c:
if self.c > other.c:
return 1
if self.c < other.c:
return -1
return 0
def __repr__(self):
return "Clock[l=%r, c=%r]" % (self.l, self.c)
@staticmethod
def timestamp_to_secs(ts):
""" Converts a timestamp to seconds.
:param ts: the clock's timestamp.
:return:
"""
l = (ts >> 16) & 0xffffffffffff
c = ts & 0xffff
return (l/1000.0) + (c/65536.0)
clock = Clock()
__all__ = ['Clock', 'clock']
|
{
"content_hash": "c2bdbb76e41f9c68cc533248bb809a73",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 74,
"avg_line_length": 24.95098039215686,
"alnum_prop": 0.48762278978388995,
"repo_name": "nickchen-mitac/fork",
"id": "18d07aefd374304bd598accb2c1013d2d6ed33d9",
"size": "2569",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/ava/util/clock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10442"
},
{
"name": "HTML",
"bytes": "11410"
},
{
"name": "JavaScript",
"bytes": "25325"
},
{
"name": "Python",
"bytes": "445788"
},
{
"name": "Shell",
"bytes": "65"
}
],
"symlink_target": ""
}
|
"""Exceptions that can be thrown from Wapi. Every one of them will
generate a response matching the type of the exception."""
from django.http import HttpResponse
class ApiError(RuntimeError):
"""Generic API Error"""
default_msg = 'API Error'
status_code = 400
def __init__(self, message=None):
RuntimeError.__init__(self, message or self.__class__.default_msg)
def get_message(self):
"""Returns the message contained in the response, some
exceptions override it"""
return self.message
def get_response(self):
"""Returns an HttpResponse using the code defined in the class"""
response = HttpResponse(self.get_message())
response.status_code = self.__class__.status_code
return response
class ApiLoginRequired(ApiError):
"""The requested action requires a valid user"""
default_msg = 'Authentication required'
status_code = 401
class ApiBadRequest(ApiError):
"""Bad parameters present in the request"""
default_msg = 'Bad request'
status_code = 400
class ApiMissingParam(ApiBadRequest):
"""A required parameter was omitted"""
default_msg = 'Missing parameter'
def __init__(self, message=None, param=None):
ApiBadRequest.__init__(self, message)
if param:
self.message = 'Missing required parameter "%s"' % param
class ApiInvalidParam(ApiBadRequest):
"""A parameter had a bad value"""
default_msg = 'Parameter with invalid value'
def __init__(self, message=None, param=None, value=None):
ApiBadRequest.__init__(self, message)
if param and value:
self.message = 'Value "%s" for parameter "%s" is not valid' % \
(param, value)
class ApiForbidden(ApiError):
"""The user hasn't permission to perform the requested action"""
status_code = 403
default_msg = 'Permission denied'
class ApiEmpty(ApiError):
"""No data to return"""
status_code = 200
default_msg = ''
|
{
"content_hash": "2aa029095a2e363ef4c306716f302a02",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 75,
"avg_line_length": 31.328125,
"alnum_prop": 0.6528678304239401,
"repo_name": "fiam/wapi",
"id": "2ef02507e5806914beca662a86677100c7c54bb6",
"size": "3148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73656"
},
{
"name": "Shell",
"bytes": "1749"
}
],
"symlink_target": ""
}
|
from motor import Motor
try:
from gpio_manager import GPIOManager as GPIOManager
DEBUG = 0
except:
from gpio_simulation import GPIOSimulation as GPIOManager
DEBUG = 1
sensor_radius = 0.1
num_magnets = 6
motor = Motor(sensor_radius, num_magnets)
gpio_manager = None
def main():
global gpio_manager
gpio_manager = debug_mode_function[DEBUG]()
gpio_manager.input_callback = __tick_occurred__
motor.run()
gpio_manager.start()
def run():
input_pin = 11
output_pins = [16, 17]
return GPIOManager(motor, output_pins, input_pin)
def simulate():
return GPIOManager(motor)
def __tick_occurred__():
motor.tick_occurred()
gpio_manager.toggle_coil_state()
debug_mode_function = {
0: run,
1: simulate
}
if (__name__ == '__main__'):
main()
|
{
"content_hash": "445c312b10cbebf1d463a63223c3f9e7",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 61,
"avg_line_length": 15.692307692307692,
"alnum_prop": 0.6507352941176471,
"repo_name": "swm93/brushless-dc-motor",
"id": "d71e0ffabbe591c7e898ed6f7c118294b4e3dee4",
"size": "911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brushless_dc_motor/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14907"
}
],
"symlink_target": ""
}
|
import os
from distutils.core import setup
from setuptools import find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-twitsocket',
version='0.1',
author=u'Bruno Renie, Gautier Hayoun',
author_email='bruno@renie.fr',
packages=find_packages(),
include_package_data=True,
url='http://github.com/brutasse/django-twitsocket',
license='BSD',
description='A twitter wall / live stream for your conference / event / topic of interest, as a Django reusable app.',
long_description=read('README.rst'),
zip_safe=False,
)
|
{
"content_hash": "acdeb7d5bf5b7454163d5f1cac4421c7",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 122,
"avg_line_length": 29.857142857142858,
"alnum_prop": 0.69377990430622,
"repo_name": "apapillon/django-twitsocket",
"id": "3282b58962ed60e3044e79fb737211292f780e68",
"size": "651",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "6086"
},
{
"name": "JavaScript",
"bytes": "29633"
},
{
"name": "Python",
"bytes": "16348"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.