text stringlengths 4 1.02M | meta dict |
|---|---|
from __future__ import unicode_literals
import gettext
import os
from datetime import datetime, timedelta
from importlib import import_module
from unittest import skipIf
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.db.models import CharField, DateField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import six, translation
from . import models
from .widgetadmin import site as widget_admin_site
try:
import pytz
except ImportError:
pytz = None
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
cls.u2 = User.objects.create(
pk=101, username='testser', first_name='Add', last_name='User', email='auser@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=False,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
models.Car.objects.create(id=1, owner=cls.u1, make='Volkswagen', model='Passat')
models.Car.objects.create(id=2, owner=cls.u2, make='BMW', model='M3')
class SeleniumDataMixin(object):
def setUp(self):
self.u1 = User.objects.create(
pk=100, username='super', first_name='Super', last_name='User', email='super@example.com',
password='sha1$995a3$6011485ea3834267d719b4c801409b8b1ddd0158', is_active=True, is_superuser=True,
is_staff=True, last_login=datetime(2007, 5, 30, 13, 20, 10),
date_joined=datetime(2007, 5, 30, 13, 20, 10)
)
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
# Check that we got a field of the right type
self.assertTrue(
isinstance(widget, widgetclass),
"Wrong widget for %s.%s: expected %s, got %s" % (
model.__class__.__name__,
fieldname,
widgetclass,
type(widget),
)
)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(models.Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(models.Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(models.Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(models.Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(models.Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(models.Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(models.Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(models.Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(models.Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(models.Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(models.Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertEqual(ff.empty_label, None)
def test_many_to_many(self):
self.assertFormfield(models.Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(models.Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(models.Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Test that widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(models.Band, admin.site)
f1 = ma.formfield_for_dbfield(models.Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(models.Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_field_with_choices(self):
self.assertFormfield(models.Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(models.Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(models.Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(models.Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(models.Advisor, admin.site)
f = ma.formfield_for_dbfield(models.Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
six.text_type(f.help_text),
'Hold down "Control", or "Command" on a Mac, to select more than one.'
)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.login(username="super", password="secret")
def test_nonexistent_target_id(self):
band = models.Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": '%s' % pk,
}
# Try posting with a non-existent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'),
{"main_band": test_str})
self.assertContains(response,
'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple="multiple" name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10" />',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20" />',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
"""
Ensure that user-supplied attrs are used.
Refs #12073.
"""
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8" />',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20" />',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10" /><br />'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10" /><br />'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8" /></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url" />'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com" /></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br />'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com" /></p>'
)
def test_render_quoting(self):
# WARNING: Don't use assertHTMLEqual in that testcase!
# assertHTMLEqual will get rid of some escapes which are tested here!
w = widgets.AdminURLFieldWidget()
self.assertEqual(
w.render('test', 'http://example.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://example.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://example-äüö.com/<sometag>some text</sometag>'),
'<p class="url">Currently: '
'<a href="http://xn--example--7za4pnc.com/%3Csometag%3Esome%20text%3C/sometag%3E">'
'http://example-äüö.com/<sometag>some text</sometag></a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com/<sometag>some text</sometag>" /></p>'
)
self.assertEqual(
w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"'),
'<p class="url">Currently: '
'<a href="http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22">'
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"</a><br />'
'Change: <input class="vURLField" name="test" type="url" '
'value="http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"" /></p>'
)
@override_settings(
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls',
)
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super(AdminFileWidgetTests, cls).setUpTestData()
band = models.Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" /> '
'<label for="test-clear_id">Clear</label></span><br />'
'Change: <input type="file" name="test" /></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test" />',
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.login(username="super", password="secret")
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<p><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
'albums\hybrid_theory.jpg</a></p>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art" />',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<p></p>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = models.Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.pk, attrs={}),
'<input type="text" name="test" value="%(bandpk)s" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/band/?_to_field=id" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>Linkin Park</strong>'
% {'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# Check that ForeignKeyRawIdWidget works with fields which aren't
# related to the model's primary key.
apple = models.Inventory.objects.create(barcode=86, name='Apple')
models.Inventory.objects.create(barcode=22, name='Pear')
core = models.Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong>Apple</strong>'
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = models.Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = models.Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s" />'
' <strong>Honeycomb object</strong>'
% {'hcombpk': big_honeycomb.pk}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = models.Individual.objects.create(name='Subject #1')
models.Individual.objects.create(name='Child', parent=subject1)
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s" />'
' <strong>Individual object</strong>'
% {'subj1pk': subject1.pk}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = models.Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = models.Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = models.Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField" />'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong>Hidden</strong>'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = models.Band.objects.create(name='Linkin Park')
m1 = models.Member.objects.create(name='Chester')
m2 = models.Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = models.Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField" />'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk, m2pk=m2.pk)
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % dict(m1pk=m1.pk)
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = models.Advisor.objects.create(name='Rockstar Techie')
c1 = models.Company.objects.create(name='Doodle')
c2 = models.Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = models.Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s" />' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s" />' % {'c1pk': c1.pk}
)
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = models.Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = models.Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_show_hide_date_time_picker_widgets(self):
"""
Ensure that pressing the ESC key closes the date and time picker
widgets.
Refs #17064.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# First, with the date picker widget ---------------------------------
# Check that the date picker is hidden
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Check that the date picker is visible
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'block')
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the date picker is hidden again
self.assertEqual(
self.get_css_value('#calendarbox0', 'display'), 'none')
# Then, with the time picker widget ----------------------------------
# Check that the time picker is hidden
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
# Click the time icon
self.selenium.find_element_by_id('clocklink0').click()
# Check that the time picker is visible
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'block')
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# Check that the time picker is hidden again
self.assertEqual(
self.get_css_value('#clockbox0', 'display'), 'none')
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
Ensure that the calendar show the date from the input field for every
locale supported by django.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = models.Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except IOError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{0:s} {1:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
self.selenium.get('{}{}'.format(self.live_server_url,
reverse('admin:admin_widgets_member_change', args=(member.pk,))))
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
class DateTimePickerSeleniumChromeTests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerSeleniumIETests(DateTimePickerSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@skipIf(pytz is None, "this test requires pytz")
@override_settings(TIME_ZONE='Asia/Singapore')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class DateTimePickerShortcutsSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_date_time_picker_shortcuts(self):
"""
Ensure that date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
now = datetime.now()
self.selenium.get('%s%s' % (self.live_server_url,
reverse('admin:admin_widgets_member_add')))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector(
'.field-birthdate .datetimeshortcuts')
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# Check that there is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector(
'.field-birthdate .timezonewarning')
# Submit the form.
self.selenium.find_element_by_tag_name('form').submit()
self.wait_page_loaded()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = models.Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
class DateTimePickerShortcutsSeleniumChromeTests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerShortcutsSeleniumIETests(DateTimePickerShortcutsSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumFirefoxTests(DateTimePickerShortcutsSeleniumFirefoxTests):
pass
class DateTimePickerAltTimezoneSeleniumChromeTests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class DateTimePickerAltTimezoneSeleniumIETests(DateTimePickerAltTimezoneSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class HorizontalVerticalFilterSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(HorizontalVerticalFilterSeleniumFirefoxTests, self).setUp()
self.lisa = models.Student.objects.create(name='Lisa')
self.john = models.Student.objects.create(name='John')
self.bob = models.Student.objects.create(name='Bob')
self.peter = models.Student.objects.create(name='Peter')
self.jenny = models.Student.objects.create(name='Jenny')
self.jason = models.Student.objects.create(name='Jason')
self.cliff = models.Student.objects.create(name='Cliff')
self.arthur = models.Student.objects.create(name='Arthur')
self.school = models.School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove,
choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.get_select_option(from_box, str(self.lisa.id))
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
from_lisa_select_option.click()
self.get_select_option(from_box, str(self.jason.id)).click()
self.get_select_option(from_box, str(self.bob.id)).click()
self.get_select_option(from_box, str(self.john.id)).click()
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id)])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.get_select_option(to_box, str(self.lisa.id))
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.get_select_option(to_box, str(self.lisa.id)).click()
self.get_select_option(to_box, str(self.bob.id)).click()
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.get_select_option(from_box, str(self.arthur.id)).click()
self.get_select_option(from_box, str(self.cliff.id)).click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box,
[str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)])
self.assertSelectOptions(to_box,
[str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id)])
def test_basic(self):
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
self.wait_page_loaded()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()),
[self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Ensure that typing in the search box filters out options displayed in
the 'from' box.
"""
from selenium.webdriver.common.keys import Keys
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_school_change', args=(self.school.id,))))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = '#id_%s_add_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
input = self.selenium.find_element_by_css_selector('#id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id)])
# -----------------------------------------------------------------
# Check that choosing a filtered option sends it properly to the
# 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.get_select_option(from_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box,
[str(self.lisa.id), str(self.peter.id),
str(self.jason.id)])
self.get_select_option(to_box, str(self.lisa.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box,
[str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id)])
self.assertSelectOptions(to_box,
[str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Check that pressing enter on a filtered option sends it properly
# to the 'to' box.
self.get_select_option(to_box, str(self.jason.id)).click()
self.selenium.find_element_by_css_selector(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_loaded()
self.school = models.School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()),
[self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()),
[self.jason, self.peter])
class HorizontalVerticalFilterSeleniumChromeTests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class HorizontalVerticalFilterSeleniumIETests(HorizontalVerticalFilterSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class AdminRawIdWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def setUp(self):
super(AdminRawIdWidgetSeleniumFirefoxTests, self).setUp()
models.Band.objects.create(id=42, name='Bogey Blues')
models.Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_main_band').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_main_band')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(
'%s%s' % (self.live_server_url, reverse('admin:admin_widgets_event_add')))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(
self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'),
'')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_supporting_bands')
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class AdminRawIdWidgetSeleniumChromeTests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class AdminRawIdWidgetSeleniumIETests(AdminRawIdWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetSeleniumFirefoxTests(SeleniumDataMixin, AdminSeleniumWebDriverTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumWebDriverTestCase.available_apps
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get('%s%s' % (
self.live_server_url,
reverse('admin:admin_widgets_profile_add')))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.wait_for('#id_user option[value="newuser"]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_popup()
self.selenium.switch_to.window('id_user')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# Wait up to 2 seconds for the new option to show up after clicking save in the popup.
self.selenium.implicitly_wait(2)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
self.selenium.implicitly_wait(0)
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile "changednewuser" was added successfully.')
profiles = models.Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
class RelatedFieldWidgetSeleniumChromeTests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class RelatedFieldWidgetSeleniumIETests(RelatedFieldWidgetSeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| {
"content_hash": "6fb7f273095bf2c0568bf4f5caf641a4",
"timestamp": "",
"source": "github",
"line_count": 1302,
"max_line_length": 119,
"avg_line_length": 44.90399385560676,
"alnum_prop": 0.6206961429915334,
"repo_name": "dydek/django",
"id": "46f2d2a76e51c0dc54b1b991eb138e04d2b605d8",
"size": "58514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/admin_widgets/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52294"
},
{
"name": "HTML",
"bytes": "170412"
},
{
"name": "JavaScript",
"bytes": "255202"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11355784"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import logging
import getopt
import sys
import couchapp.commands as commands
from couchapp.errors import AppError, CommandLineError
from couchapp.config import Config
logger = logging.getLogger(__name__)
class NullHandler(logging.Handler):
""" null log handler """
def emit(self, record):
pass
def set_logging(level=2):
"""
Set level of logging, and choose where to display/save logs
(file or standard output).
"""
handler = logging.StreamHandler()
logger_ = logging.getLogger('couchapp')
logger_.setLevel(level * 10)
format = r"%(asctime)s [%(levelname)s] %(message)s"
datefmt = r"%Y-%m-%d %H:%M:%S"
handler.setFormatter(logging.Formatter(format, datefmt))
logger_.addHandler(handler)
def set_logging_level(level=2):
logger_ = logging.getLogger('couchapp')
logger_.setLevel(level * 10)
def run():
sys.exit(dispatch(sys.argv[1:]))
def dispatch(args):
set_logging()
try:
return _dispatch(args)
except AppError as e:
logger.error("couchapp error: %s" % str(e))
except CommandLineError as e:
logger.error("command line error: {0}".format(e))
except KeyboardInterrupt:
logger.info("keyboard interrupt")
except Exception as e:
import traceback
logger.critical("%s\n\n%s" % (str(e), traceback.format_exc()))
return -1
def _dispatch(args):
conf = Config()
# update commands
for mod in conf.extensions:
cmdtable = getattr(mod, 'cmdtable', {})
commands.table.update(cmdtable)
cmd, globalopts, opts, args = _parse(args)
if globalopts["help"]:
del globalopts["help"]
return commands.usage(conf, *args, **globalopts)
elif globalopts["version"]:
del globalopts["version"]
return commands.version(conf, *args, **globalopts)
verbose = 2
if globalopts["debug"]:
verbose = 1
import restkit
restkit.set_logging("debug")
elif globalopts["verbose"]:
verbose = 1
elif globalopts["quiet"]:
verbose = 0
set_logging_level(verbose)
if cmd not in commands.table:
raise CommandLineError('Unknown command: "{0}"'.format(cmd))
fun = commands.table[cmd][0]
if cmd in commands.incouchapp:
return fun(conf, conf.app_dir, *args, **opts)
return fun(conf, *args, **opts)
def _parse(args):
options = {}
cmdoptions = {}
try:
args = parseopts(args, commands.globalopts, options)
except getopt.GetoptError as e:
raise CommandLineError(str(e))
if args:
cmd, args = args[0], args[1:]
if cmd in commands.table:
cmdopts = list(commands.table[cmd][1])
else:
cmdopts = []
else:
cmd = "help"
cmdopts = list(commands.table[cmd][1])
for opt in commands.globalopts:
cmdopts.append((opt[0], opt[1], options[opt[1]], opt[3]))
try:
args = parseopts(args, cmdopts, cmdoptions)
except getopt.GetoptError as e:
raise CommandLineError((cmd, e))
for opt in cmdoptions.keys():
if opt in options:
options[opt] = cmdoptions[opt]
del cmdoptions[opt]
return cmd, options, cmdoptions, args
def parseopts(args, options, state):
namelist = []
shortlist = ''
argmap = {}
defmap = {}
for short, name, default, comment in options:
oname = name
name = name.replace('-', '_')
argmap['-' + short] = argmap['--' + oname] = name
defmap[name] = default
if isinstance(default, list):
state[name] = default[:]
else:
state[name] = default
if not (default is None or default is True or default is False):
if short:
short += ':'
if oname:
oname += '='
if short:
shortlist += short
if name:
namelist.append(oname)
opts, args = getopt.getopt(args, shortlist, namelist)
for opt, val in opts:
name = argmap[opt]
t = type(defmap[name])
if t is type(1):
state[name] = int(val)
elif t is type(''):
state[name] = val
elif t is type([]):
state[name].append(val)
elif t is type(None) or t is type(False):
state[name] = True
return args
| {
"content_hash": "39d9ca7533d5289d5729f4f2f2ef1495",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 72,
"avg_line_length": 25.47093023255814,
"alnum_prop": 0.5843414745491897,
"repo_name": "flimzy/couchapp",
"id": "4258814db31d21406ee08cff7c5ace71d0e20532",
"size": "4516",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "couchapp/dispatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "262"
},
{
"name": "CSS",
"bytes": "887"
},
{
"name": "HTML",
"bytes": "4849"
},
{
"name": "Inno Setup",
"bytes": "1834"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Python",
"bytes": "145970"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
"""
check_schedule.py: check correct scheduled show title information
"""
__author__ = "Kentaro Sasaki"
__copyright__ = "Copyright 2014 Kentaro Sasaki"
import datetime
import json
import logging
def _load_json(file_name):
logging.debug("Open json file: %s" % file_name)
with open(file_name) as fp:
return json.load(fp)
def _check_weekday(weekday, now, check = False):
"""
"""
if int(weekday) == now.isoweekday():
check = True
logging.debug("Check correct weekday flag: %s" % check)
return check
def _check_showtime(showtime, now, check = False):
"""
"""
if showtime == now.strftime("%H:%M"):
check = True
logging.debug("Check correct showtime flag: %s" % check)
return check
def check_target(programs_json, program = None):
now = datetime.datetime.now()
logging.debug("Now is %s" % str(now))
programs = _load_json(programs_json)
for i in range(len(programs)):
weekday = programs[i]["weekday"]
showtime = programs[i]["showtime"]
if (_check_weekday(weekday, now) and _check_showtime(showtime, now)):
program = programs[i]
return program
| {
"content_hash": "76d0dce8bc26d8d557e97268f44c52bb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 23.229166666666668,
"alnum_prop": 0.662780269058296,
"repo_name": "kentarosasaki/agqr-rec",
"id": "98148ebf8594caf910be21bcc9795f9c41c3f79a",
"size": "1661",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reclib/check_schedule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12020"
}
],
"symlink_target": ""
} |
import os,sys
BASEDIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASEDIR)
from core import client
if __name__ == "__main__":
client.run()
| {
"content_hash": "a1188017a8156d2265625e36b837c08e",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 69,
"avg_line_length": 29.5,
"alnum_prop": 0.6779661016949152,
"repo_name": "jianbosky/ftp_server_client",
"id": "8a27da6aaa89bce51034192ff8fbf49736aca071",
"size": "222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ftp_client/bin/ftp_client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30975"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.utils.timezone import now
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djangobmf.conf import settings
from djangobmf.models import BMFModel
from .serializers import GoalSerializer
from .serializers import TaskSerializer
from .workflows import GoalWorkflow
from .workflows import TaskWorkflow
from .permissions import GoalFilter
from .permissions import TaskFilter
from math import floor
class GoalManager(models.Manager):
def get_queryset(self):
return super(GoalManager, self) \
.get_queryset() \
.select_related('project')
@python_2_unicode_compatible
class AbstractGoal(BMFModel):
"""
"""
summary = models.CharField(_("Title"), max_length=255, null=True, blank=False, )
description = models.TextField(_("Description"), null=True, blank=True, )
project = models.ForeignKey( # TODO: make optional
settings.CONTRIB_PROJECT, null=True, blank=True, on_delete=models.CASCADE,
)
referee = models.ForeignKey(
settings.CONTRIB_EMPLOYEE, null=True, blank=True, on_delete=models.SET_NULL,
related_name="+"
)
team = models.ForeignKey(
settings.CONTRIB_TEAM, null=True, blank=True, on_delete=models.SET_NULL,
)
employees = models.ManyToManyField(
settings.CONTRIB_EMPLOYEE, blank=True,
related_name="employees"
)
completed = models.BooleanField(_("Completed"), default=False, editable=False)
objects = GoalManager()
class Meta(BMFModel.Meta): # only needed for abstract models
verbose_name = _('Goal')
verbose_name_plural = _('Goals')
ordering = ['project__name', 'summary']
abstract = True
permissions = (
('can_manage', 'Can manage all goals'),
)
swappable = "BMF_CONTRIB_GOAL"
class BMFMeta:
has_logging = False
workflow = GoalWorkflow
can_clone = True
serializer = GoalSerializer
filter_queryset = GoalFilter
def bmfget_customer(self):
if self.project:
return self.project.customer
return None
def bmfget_project(self):
return self.project
@staticmethod
def bmfrelated_project_queryset(qs):
return qs.filter(completed=False)
def __str__(self):
return '%s' % (self.summary)
def get_states(self):
active_states = 0
states = {
"hold": 0.,
"review": 0.,
"done": 0.,
"todo": 0.,
}
for state, count in self.task_set.values_list('state').annotate(count=models.Count('state')).order_by():
if state in ["new", "open", ]:
active_states += count
if state in ["hold", ]:
states["hold"] += count
active_states += count
if state in ["todo", "started"]:
states["todo"] += count
active_states += count
if state in ["review", ]:
states["review"] += count
active_states += count
if state in ["finished", ]:
states["done"] += count
active_states += count
if active_states == 0:
return states
states['hold'] = '%4.2f' % (floor(10000 * states["hold"] / active_states) / 100)
states['done'] = '%4.2f' % (floor(10000 * states["done"] / active_states) / 100)
states['todo'] = '%4.2f' % (floor(10000 * states["todo"] / active_states) / 100)
states['review'] = '%4.2f' % (floor(10000 * states["review"] / active_states) / 100)
return states
class BaseGoal(AbstractGoal):
class Meta(AbstractGoal.Meta):
abstract = True
class Goal(BaseGoal):
pass
class TaskManager(models.Manager):
def get_queryset(self):
related = ['goal', 'project']
return super(TaskManager, self).get_queryset() \
.annotate(due_count=models.Count('due_date')) \
.order_by('-due_count', 'due_date', 'summary') \
.select_related(*related)
@python_2_unicode_compatible
class AbstractTask(BMFModel):
"""
"""
summary = models.CharField(_("Title"), max_length=255, null=True, blank=False)
description = models.TextField(_("Description"), null=True, blank=True)
due_date = models.DateField(_('Due date'), null=True, blank=True)
project = models.ForeignKey( # TODO: make optional
settings.CONTRIB_PROJECT, null=True, blank=True, on_delete=models.CASCADE,
)
employee = models.ForeignKey(
settings.CONTRIB_EMPLOYEE, null=True, blank=True, on_delete=models.SET_NULL,
)
in_charge = models.ForeignKey(
settings.CONTRIB_EMPLOYEE, null=True, blank=True, on_delete=models.SET_NULL,
related_name="+", editable=False,
)
goal = models.ForeignKey(settings.CONTRIB_GOAL, null=True, blank=True, on_delete=models.CASCADE)
completed = models.BooleanField(_("Completed"), default=False, editable=False)
objects = TaskManager()
class Meta(BMFModel.Meta): # only needed for abstract models
verbose_name = _('Task')
verbose_name_plural = _('Tasks')
ordering = ['due_date', 'summary']
abstract = True
swappable = "BMF_CONTRIB_TASK"
class BMFMeta:
workflow = TaskWorkflow
serializer = TaskSerializer
has_files = True
has_comments = True
filter_queryset = TaskFilter
def __str__(self):
if self.goal:
return '[%s] #%s: %s' % (self.goal, self.pk, self.summary)
else:
return '#%s: %s' % (self.pk, self.summary)
def clean(self):
# overwrite the project with the goals project
if self.goal:
self.project = self.goal.project
def get_project_queryset(self, qs):
if self.goal:
return qs.filter(goal=self.goal)
else:
return qs
def get_goal_queryset(self, qs):
if self.project:
return qs.filter(project=self.project)
else:
return qs
def due_days(self):
if self.due_date:
time = now().date()
if time >= self.due_date:
return 0
return (self.due_date - time).days
class BaseTask(AbstractTask):
class Meta(AbstractTask.Meta):
abstract = True
class Task(BaseTask):
pass
| {
"content_hash": "2aa30d262a53ce849541042cbcca2ab3",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 112,
"avg_line_length": 29.472972972972972,
"alnum_prop": 0.5986550511997555,
"repo_name": "django-bmf/django-bmf",
"id": "5700599e6ee3effc718d167611726a61ac1bd2e5",
"size": "6591",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "djangobmf/contrib/task/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11420"
},
{
"name": "CoffeeScript",
"bytes": "3197"
},
{
"name": "HTML",
"bytes": "117091"
},
{
"name": "JavaScript",
"bytes": "80435"
},
{
"name": "Python",
"bytes": "774167"
},
{
"name": "Shell",
"bytes": "736"
}
],
"symlink_target": ""
} |
import logging
import sys
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.views.decorators.http import require_http_methods
from search.db.query import Query
@require_http_methods(["GET"])
def index(request):
if request.GET.get("q", ""):
session_id = request.COOKIES["sessionid"]
user_query = request.GET.get("q", "")
q = Query()
search_results = q.search_from_user_query(user_query)
q.write_session_info(session_id, user_query)
q.close()
return (
JsonResponse(
{
"resultList": [
{
"articleName": article,
"authors": list(set(authors)),
"datasets": list(set(datasets)),
}
for article, authors, datasets in search_results
]
}
)
if request.session.test_cookie_worked()
else HttpResponse("Please enable cookies to use this search engine")
)
request.session.set_test_cookie()
return render(request, "search/index.html")
| {
"content_hash": "3c54138c93234cc3ae394b248d7ffc48",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 28.58139534883721,
"alnum_prop": 0.531326281529699,
"repo_name": "duosproject/duos-web",
"id": "3f1a322c42bb69c5211956c6022a06648684ae2b",
"size": "1254",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "633"
},
{
"name": "HTML",
"bytes": "1820"
},
{
"name": "JavaScript",
"bytes": "5666"
},
{
"name": "Python",
"bytes": "7325"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from future.utils import iteritems
from mock import Mock
from unittest import TestCase
from sqlalchemy import Boolean, Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import as_declarative, declared_attr
from sqlalchemy.orm import backref, relation
from fixtureupper.register import UpperRegister
@as_declarative()
class _Base(object):
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
id = Column(Integer, primary_key=True)
class A(_Base):
b_id = Column(Integer, ForeignKey('b.id'))
b = relation('B', backref='a')
c_id = Column(Integer, ForeignKey('c.id'))
c = relation('C', backref=backref('a', uselist=False))
d_id = Column(Integer, ForeignKey('d.id'))
d = relation('D', back_populates='a')
e = relation('E', backref='a')
class B(_Base):
pass
class C(_Base):
pass
class D(_Base):
a = relation('A', back_populates='d')
class E(_Base):
a_id = Column(Integer, ForeignKey('a.id'))
class BaseTestMockFixtureUpper(TestCase):
def setUp(self):
class MockFixtureUpper(UpperRegister('SqlAlchemyModel')):
model = A
self.MockFixtureUpper = MockFixtureUpper
class TestSetRelations(BaseTestMockFixtureUpper):
def test_a_b(self):
a = A(id=1)
b = B(id=2)
self.MockFixtureUpper().set_relation(a, b, 'b')
self.assertEqual(a.b_id, 2)
def test_b_a(self):
a = [A(id=1), A(id=2)]
b = B(id=3)
self.MockFixtureUpper().set_relation(b, a, 'a')
self.assertEqual(a[0].b_id, 3)
self.assertEqual(a[1].b_id, 3)
def test_a_c(self):
a = A(id=1)
c = C(id=2)
self.MockFixtureUpper().set_relation(a, c, 'c')
self.assertEqual(a.c_id, 2)
def test_c_a(self):
a = A(id=1)
c = C(id=2)
self.MockFixtureUpper().set_relation(c, a, 'a')
self.assertEqual(a.c_id, 2)
def test_a_d(self):
a = A(id=1)
d = D(id=2)
self.MockFixtureUpper().set_relation(a, d, 'd')
self.assertEqual(a.d_id, 2)
def test_d_a(self):
a = [A(id=1), A(id=2)]
d = D(id=3)
self.MockFixtureUpper().set_relation(d, a, 'a')
self.assertEqual(a[0].d_id, 3)
self.assertEqual(a[1].d_id, 3)
def test_a_e(self):
a = A(id=1)
e = [E(id=2), E(id=3)]
self.MockFixtureUpper().set_relation(a, e, 'e')
self.assertEqual(e[0].a_id, 1)
self.assertEqual(e[1].a_id, 1)
def test_a_e(self):
a = A(id=1)
e = E(id=2)
self.MockFixtureUpper().set_relation(e, a, 'a')
self.assertEqual(e.a_id, 1)
def test_a_e_with_no_id(self):
a = A()
e = E()
self.MockFixtureUpper().set_relation(e, a, 'a')
self.assertEqual(e.a_id, None)
| {
"content_hash": "b5724252d202c2d12dd7964a6d0a2aba",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 68,
"avg_line_length": 25.136752136752136,
"alnum_prop": 0.5759945596735804,
"repo_name": "Rhathe/fixtureupper",
"id": "cc4af316d1c6782e7d46088ab58ab2a4c39be521",
"size": "2941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/test_sqlalchemy_model_fixture_upper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49877"
}
],
"symlink_target": ""
} |
from .generated.FoxySheepLexer import FoxySheepLexer
from .generated.FoxySheepParser import FoxySheepParser
from .generated.FoxySheepListener import FoxySheepListener
from .generated.FoxySheepVisitor import FoxySheepVisitor
from .generated.FullFormLexer import FullFormLexer
from .generated.FullFormParser import FullFormParser
from .generated.FullFormListener import FullFormListener
from .generated.FullFormVisitor import FullFormVisitor
from .FullFormEmitter import FullFormEmitter
from .PostParser import PostParser | {
"content_hash": "ded1579659c27d902eec1197efa549b4",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 43.416666666666664,
"alnum_prop": 0.8867562380038387,
"repo_name": "rljacobson/FoxySheep",
"id": "0ddaa772c3827e0f4513935408c1c7d45fd564dc",
"size": "521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_target/FoxySheep/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ANTLR",
"bytes": "24295"
},
{
"name": "Java",
"bytes": "72986"
},
{
"name": "Jupyter Notebook",
"bytes": "8769"
},
{
"name": "Makefile",
"bytes": "2878"
},
{
"name": "Mathematica",
"bytes": "710"
},
{
"name": "Matlab",
"bytes": "8"
},
{
"name": "Python",
"bytes": "173842"
}
],
"symlink_target": ""
} |
def calc_bin(value, bound_min, bound_max, bins):
"""Find bin in parameter range.
Args:
value (float): some value, the result of a simulation.
bound_min (float): lower limit, defining the parameter-space.
bound_max (float): upper limit, defining the parameter-space.
bins (int): number of bins used to subdivide parameter-space.
Returns:
Bin(int) corresponding to the input-value.
"""
step = (bound_max - bound_min) / bins
assigned_bin = (value - bound_min) // step
assigned_bin = min(assigned_bin, bins-1)
assigned_bin = max(assigned_bin, 0)
return int(assigned_bin)
def calc_bins(box_r, num_bins, prop1range=(0.0, 1.0), prop2range=(0.0, 1.0)):
return [(calc_bin(b[0], *prop1range, num_bins), calc_bin(b[1], *prop2range, num_bins)) for b in box_r]
| {
"content_hash": "9d3cf61aa407ad7b5b2a8fd1d26f9775",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 106,
"avg_line_length": 46.05555555555556,
"alnum_prop": 0.6441495778045838,
"repo_name": "WilmerLab/HTSOHM-dev",
"id": "3b3bf1a07c6da2973a827ac2ab4c0924daa9b464",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "htsohm/bins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44135"
},
{
"name": "Shell",
"bytes": "1492"
}
],
"symlink_target": ""
} |
default_app_config = 'tethys_datasets.apps.TethysDatasetsConfig' | {
"content_hash": "4ab4b49963f323a17a8a9242bd27b838",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 64,
"avg_line_length": 64,
"alnum_prop": 0.84375,
"repo_name": "CI-WATER/django-tethys_datasets",
"id": "4ed6e21a5f4e6cdccb87101cede8f09a870f59a3",
"size": "93",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tethys_datasets/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "25645"
}
],
"symlink_target": ""
} |
from pathlib import Path
import pytest
from libqtile import config, confreader, utils
configs_dir = Path(__file__).resolve().parent / "configs"
def load_config(name):
f = confreader.Config(configs_dir / name)
f.load()
return f
def test_validate():
# bad key
f = load_config("basic.py")
f.keys[0].key = "nonexistent"
with pytest.raises(confreader.ConfigError):
f.validate()
# bad modifier
f = load_config("basic.py")
f.keys[0].modifiers = ["nonexistent"]
with pytest.raises(confreader.ConfigError):
f.validate()
def test_basic():
f = load_config("basic.py")
assert f.keys
def test_syntaxerr():
with pytest.raises(SyntaxError):
load_config("syntaxerr.py")
def test_falls_back():
f = load_config("basic.py")
# We just care that it has a default, we don't actually care what the
# default is; don't assert anything at all about the default in case
# someone changes it down the road.
assert hasattr(f, "follow_mouse_focus")
def cmd(x):
return None
def test_ezkey():
key = config.EzKey("M-A-S-a", cmd, cmd)
modkey, altkey = (config.EzConfig.modifier_keys[i] for i in "MA")
assert key.modifiers == [modkey, altkey, "shift"]
assert key.key == "a"
assert key.commands == (cmd, cmd)
key = config.EzKey("M-<Tab>", cmd)
assert key.modifiers == [modkey]
assert key.key == "Tab"
assert key.commands == (cmd,)
with pytest.raises(utils.QtileError):
config.EzKey("M--", cmd)
with pytest.raises(utils.QtileError):
config.EzKey("Z-Z-z", cmd)
with pytest.raises(utils.QtileError):
config.EzKey("asdf", cmd)
with pytest.raises(utils.QtileError):
config.EzKey("M-a-A", cmd)
def test_ezclick_ezdrag():
btn = config.EzClick("M-1", cmd)
assert btn.button == "Button1"
assert btn.modifiers == [config.EzClick.modifier_keys["M"]]
btn = config.EzDrag("A-2", cmd)
assert btn.button == "Button2"
assert btn.modifiers == [config.EzClick.modifier_keys["A"]]
| {
"content_hash": "0ba0dcbc4abd0c6e9ee0d6b5625a2427",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 73,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.6353510895883777,
"repo_name": "ramnes/qtile",
"id": "7eb1dec1514f06ffd643c04fcb0625aefcf004c1",
"size": "3226",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "535"
},
{
"name": "Python",
"bytes": "2135461"
},
{
"name": "Shell",
"bytes": "8090"
}
],
"symlink_target": ""
} |
import sys
from pyasn1.codec.der import decoder as der_decoder
from pyasn1.codec.der import encoder as der_encoder
from pyasn1_modules import pem
from pyasn1_modules import rfc2511
try:
import unittest2 as unittest
except ImportError:
import unittest
class CertificateReqTestCase(unittest.TestCase):
pem_text = """\
MIIBozCCAZ8wggEFAgUAwTnj2jCByoABAqURMA8xDTALBgNVBAMTBHVzZXKmgZ8w
DQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAJ6ZQ2cYbn/lFsmBOlRltbRbFQUvvE0Q
nbopOu1kC7Bmaaz7QTx8nxeiHi4m7uxCbGGxHNoGCt7EmdG8eZUBNAcHyGlXrJdm
0z3/uNEGiBHq+xB8FnFJCA5EIJ3RWFnlbu9otSITLxWK7c5+/NHmWM+yaeHD/f/h
rp01c/8qXZfZAgMBAAGpEDAOBgNVHQ8BAf8EBAMCBeAwLzASBgkrBgEFBQcFAQEM
BTExMTExMBkGCSsGAQUFBwUBAgwMc2VydmVyX21hZ2ljoYGTMA0GCSqGSIb3DQEB
BQUAA4GBAEI3KNEvTq/n1kNVhNhPkovk1AZxyJrN1u1+7Gkc4PLjWwjLOjcEVWt4
AajUk/gkIJ6bbeO+fZlMjHfPSDKcD6AV2hN+n72QZwfzcw3icNvBG1el9EU4XfIm
xfu5YVWi81/fw8QQ6X6YGHFQkomLd7jxakVyjxSng9BhO6GpjJNF
"""
def setUp(self):
self.asn1Spec = rfc2511.CertReqMessages()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder.decode(substrate, asn1Spec=self.asn1Spec)
assert not rest
assert asn1Object.prettyPrint()
assert der_encoder.encode(asn1Object) == substrate
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "10b95fc3cc11510fef450314afa256bc",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 80,
"avg_line_length": 31.130434782608695,
"alnum_prop": 0.8107541899441341,
"repo_name": "catapult-project/catapult",
"id": "cd5e266ce3c8ef5e733d39e245a8a92a59100fa7",
"size": "1591",
"binary": false,
"copies": "13",
"ref": "refs/heads/main",
"path": "third_party/gsutil/third_party/pyasn1-modules/tests/test_rfc2511.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
} |
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import xml.etree.ElementTree as ET
from ncclient import manager
from ncclient import xml_
import ncclient
import pynos.versions.ver_5.ver_5_0_1.bgp
import pynos.versions.ver_5.ver_5_0_1.snmp
import pynos.versions.ver_5.ver_5_0_1.interface
import pynos.versions.ver_5.ver_5_0_1.lldp
import pynos.versions.ver_5.ver_5_0_1.system
import pynos.versions.ver_6.ver_6_0_1.bgp
import pynos.versions.ver_6.ver_6_0_1.snmp
import pynos.versions.ver_6.ver_6_0_1.interface
import pynos.versions.ver_6.ver_6_0_1.lldp
import pynos.versions.ver_6.ver_6_0_1.system
VERSIONS = {
'5.0.1': {
'bgp': pynos.versions.ver_5.ver_5_0_1.bgp.BGP,
'snmp': pynos.versions.ver_5.ver_5_0_1.snmp.SNMP,
'interface': pynos.versions.ver_5.ver_5_0_1.interface.Interface,
'lldp': pynos.versions.ver_5.ver_5_0_1.lldp.LLDP,
'system': pynos.versions.ver_5.ver_5_0_1.system.System,
},
'6.0.1': {
'bgp': pynos.versions.ver_6.ver_6_0_1.bgp.BGP,
'snmp': pynos.versions.ver_6.ver_6_0_1.snmp.SNMP,
'interface': pynos.versions.ver_6.ver_6_0_1.interface.Interface,
'lldp': pynos.versions.ver_6.ver_6_0_1.lldp.LLDP,
'system': pynos.versions.ver_6.ver_6_0_1.system.System,
}
}
NOS_ATTRS = ['bgp', 'snmp', 'interface', 'lldp', 'system']
class DeviceCommError(Exception):
"""
Error with device communication.
"""
pass
class Device(object):
"""
Device object holds the state for a single NOS device.
Attributes:
bgp: BGP related actions and attributes.
interface: Interface related actions and attributes.
snmp: SNMP related actions and attributes.
lldp: LLDP related actions and attributes.
system: System level actions and attributes.
"""
def __init__(self, **kwargs):
"""
Args:
conn (tuple): IP/Hostname and port of the VDX device you
intend to connect to. Ex. ('10.0.0.1', '22')
auth (tuple): Username and password of the VDX device you
intend to connect to. Ex. ('admin', 'password')
hostkey_verify (bool): True to verify hostkey, False to bypass
verify.
auth_method (string): ```key``` if using ssh-key auth.
```userpass``` if using username/password auth.
auth_key (string): Location of ssh key to use for authentication.
Returns:
Instance of the device object.
Examples:
>>> from pprint import pprint
>>> import pynos.device
>>> conn = ('10.24.48.225', '22')
>>> auth = ('admin', 'password')
>>> dev = pynos.device.Device(conn=conn, auth=auth)
>>> dev.connection
True
>>> del dev
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... pprint(dev.mac_table) # doctest: +ELLIPSIS
[{'interface'...'mac_address'...'state'...'type'...'vlan'...}]
>>> dev.connection
False
"""
self._conn = kwargs.pop('conn')
self._auth = kwargs.pop('auth', (None, None))
self._hostkey_verify = kwargs.pop('hostkey_verify', None)
self._auth_method = kwargs.pop('auth_method', 'userpass')
self._auth_key = kwargs.pop('auth_key', None)
self._mgr = None
self.reconnect()
ver = self.firmware_version
for nos_attr in NOS_ATTRS:
setattr(self, nos_attr, VERSIONS[ver][nos_attr](self._callback))
def __enter__(self):
if not self.connection:
self.reconnect()
return self
def __exit__(self, exctype, excisnt, exctb):
if self.connection:
self.close()
@property
def mac_table(self):
"""list[dict]: the MAC table of the device.
"""
table = []
namespace = 'urn:brocade.com:mgmt:brocade-mac-address-table'
request_mac_table = ET.Element('get-mac-address-table',
xmlns=namespace)
result = self._callback(request_mac_table, handler='get')
for entry in result.findall('{%s}mac-address-table' % namespace):
address = entry.find('{%s}mac-address' % namespace).text
vlan = entry.find('{%s}vlanid' % namespace).text
mac_type = entry.find('{%s}mac-type' % namespace).text
state = entry.find('{%s}mac-state' % namespace).text
interface = entry.find('{%s}forwarding-interface' % namespace)
interface_type = interface.find('{%s}interface-type' %
namespace).text
interface_name = interface.find('{%s}interface-name' %
namespace).text
interface = '%s%s' % (interface_type, interface_name)
table.append(dict(mac_address=address, interface=interface,
state=state, vlan=vlan,
type=mac_type))
return table
@property
def firmware_version(self):
"""
Returns firmware version.
Args:
None
Returns:
Dictionary
Raises:
None
"""
namespace = "urn:brocade.com:mgmt:brocade-firmware-ext"
request_ver = ET.Element("show-firmware-version", xmlns=namespace)
ver = self._callback(request_ver, handler='get')
return ver.find('.//*{%s}os-version' % namespace).text
def _callback(self, call, handler='edit_config', target='running',
source='startup'):
"""
Callback for NETCONF calls.
Args:
call: An Element Tree element containing the XML of the NETCONF
call you intend to make to the device.
handler: Type of ncclient call to make.
get: ncclient dispatch. For custom RPCs.
edit_config: NETCONF standard edit.
delete_config: NETCONF standard delete.
copy_config: NETCONF standard copy.
target: Target configuration location for action. Only used for
edit_config, delete_config, and copy_config.
source: Source of configuration information for copying
configuration. Only used for copy_config.
Returns:
None
Raises:
None
"""
try:
call = ET.tostring(call)
if handler == 'get':
call_element = xml_.to_ele(call)
return ET.fromstring(str(self._mgr.dispatch(call_element)))
if handler == 'edit_config':
self._mgr.edit_config(target=target, config=call)
if handler == 'delete_config':
self._mgr.delete_config(target=target)
if handler == 'copy_config':
self._mgr.copy_config(target=target, source=source)
except (ncclient.transport.TransportError,
ncclient.transport.SessionCloseError,
ncclient.transport.SSHError,
ncclient.transport.AuthenticationError,
ncclient.transport.SSHUnknownHostError) as error:
logging.error(error)
raise DeviceCommError
@property
def connection(self):
"""
Poll if object is still connected to device in question.
Args:
None
Returns:
bool: True if connected, False if not.
Raises:
None
"""
return self._mgr.connected
def reconnect(self):
"""
Reconnect session with device.
Args:
None
Returns:
bool: True if reconnect succeeds, False if not.
Raises:
None
"""
if self._auth_method is "userpass":
self._mgr = manager.connect(host=self._conn[0],
port=self._conn[1],
username=self._auth[0],
password=self._auth[1],
hostkey_verify=self._hostkey_verify)
elif self._auth_method is "key":
self._mgr = manager.connect(host=self._conn[0],
port=self._conn[1],
username=self._auth[0],
key_filename=self._auth_key,
hostkey_verify=self._hostkey_verify)
else:
raise ValueError("auth_method incorrect value.")
self._mgr.timeout = 600
return True
def find_interface_by_mac(self, **kwargs):
"""Find the interface through which a MAC can be reached.
Args:
mac_address (str): A MAC address in 'xx:xx:xx:xx:xx:xx' format.
Returns:
list[dict]: a list of mac table data.
Raises:
KeyError: if `mac_address` is not specified.
Examples:
>>> from pprint import pprint
>>> import pynos.device
>>> conn = ('10.24.48.225', '22')
>>> auth = ('admin', 'password')
>>> with pynos.device.Device(conn=conn, auth=auth) as dev:
... x = dev.find_interface_by_mac(
... mac_address='01:23:45:67:89:ab')
... pprint(x) # doctest: +ELLIPSIS
[{'interface'...'mac_address'...'state'...'type'...'vlan'...}]
"""
mac = kwargs.pop('mac_address')
results = [x for x in self.mac_table if x['mac_address'] == mac]
return results
def close(self):
"""Close NETCONF session.
Args:
None
Returns:
None
Raises:
None
Examples:
>>> import pynos.device
>>> conn = ('10.24.48.225', '22')
>>> auth = ('admin', 'password')
>>> dev = pynos.device.Device(conn=conn, auth=auth)
>>> dev.connection
True
>>> dev.close() # doctest: +ELLIPSIS
<?xml...<rpc-reply...<ok/>...
>>> dev.connection
False
"""
return self._mgr.close_session()
| {
"content_hash": "8fe4af7732afd34f239d009b76650e52",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 77,
"avg_line_length": 34.60634920634921,
"alnum_prop": 0.5461884230804513,
"repo_name": "SivagnanamCiena/pynos",
"id": "1e19767c0f8f70f0f898f4b672f3ae812bf26d4a",
"size": "10923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynos/device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20665905"
}
],
"symlink_target": ""
} |
"""Compute the weighted sum of qubit states."""
from typing import List, Optional
import numpy as np
from qiskit.circuit import QuantumRegister, AncillaRegister, QuantumCircuit
from ..blueprintcircuit import BlueprintCircuit
class WeightedAdder(BlueprintCircuit):
r"""A circuit to compute the weighted sum of qubit registers.
Given :math:`n` qubit basis states :math:`q_0, \ldots, q_{n-1} \in \{0, 1\}` and non-negative
integer weights :math:`\lambda_0, \ldots, \lambda_{n-1}`, this circuit performs the operation
.. math::
|q_0 \ldots q_{n-1}\rangle |0\rangle_s
\mapsto |q_0 \ldots q_{n-1}\rangle |\sum_{j=0}^{n-1} \lambda_j q_j\rangle_s
where :math:`s` is the number of sum qubits required.
This can be computed as
.. math::
s = 1 + \left\lfloor \log_2\left( \sum_{j=0}^{n-1} \lambda_j \right) \right\rfloor
or :math:`s = 1` if the sum of the weights is 0 (then the expression in the logarithm is
invalid).
For qubits in a circuit diagram, the first weight applies to the upper-most qubit.
For an example where the state of 4 qubits is added into a sum register, the circuit can
be schematically drawn as
.. parsed-literal::
┌────────┐
state_0: ┤0 ├ | state_0 * weights[0]
│ │ |
state_1: ┤1 ├ | + state_1 * weights[1]
│ │ |
state_2: ┤2 ├ | + state_2 * weights[2]
│ │ |
state_3: ┤3 ├ | + state_3 * weights[3]
│ │
sum_0: ┤4 ├ |
│ Adder │ |
sum_1: ┤5 ├ | = sum_0 * 2^0 + sum_1 * 2^1 + sum_2 * 2^2
│ │ |
sum_2: ┤6 ├ |
│ │
carry_0: ┤7 ├
│ │
carry_1: ┤8 ├
│ │
control_0: ┤9 ├
└────────┘
"""
def __init__(
self,
num_state_qubits: Optional[int] = None,
weights: Optional[List[int]] = None,
name: str = "adder",
) -> None:
"""Computes the weighted sum controlled by state qubits.
Args:
num_state_qubits: The number of state qubits.
weights: List of weights, one for each state qubit. If none are provided they
default to 1 for every qubit.
name: The name of the circuit.
"""
super().__init__(name=name)
self._weights = None
self._num_state_qubits = None
self.weights = weights
self.num_state_qubits = num_state_qubits
@property
def num_sum_qubits(self) -> int:
"""The number of sum qubits in the circuit.
Returns:
The number of qubits needed to represent the weighted sum of the qubits.
"""
if sum(self.weights) > 0:
return int(np.floor(np.log2(sum(self.weights))) + 1)
return 1
@property
def weights(self) -> List[int]:
"""The weights for the qubit states.
Returns:
The weight for the qubit states.
"""
if self._weights:
return self._weights
if self.num_state_qubits:
return [1] * self.num_state_qubits
return None
@weights.setter
def weights(self, weights: List[int]) -> None:
"""Set the weights for summing the qubit states.
Args:
weights: The new weights.
Raises:
ValueError: If not all weights are close to an integer.
"""
if weights:
for i, weight in enumerate(weights):
if not np.isclose(weight, np.round(weight)):
raise ValueError("Non-integer weights are not supported!")
weights[i] = np.round(weight)
self._invalidate()
self._weights = weights
self._reset_registers()
@property
def num_state_qubits(self) -> int:
"""The number of qubits to be summed.
Returns:
The number of state qubits.
"""
return self._num_state_qubits
@num_state_qubits.setter
def num_state_qubits(self, num_state_qubits: int) -> None:
"""Set the number of state qubits.
Args:
num_state_qubits: The new number of state qubits.
"""
if self._num_state_qubits is None or num_state_qubits != self._num_state_qubits:
self._invalidate()
self._num_state_qubits = num_state_qubits
self._reset_registers()
def _reset_registers(self):
"""Reset the registers."""
self.qregs = []
if self.num_state_qubits:
qr_state = QuantumRegister(self.num_state_qubits, name="state")
qr_sum = QuantumRegister(self.num_sum_qubits, name="sum")
self.qregs = [qr_state, qr_sum]
if self.num_carry_qubits > 0:
qr_carry = AncillaRegister(self.num_carry_qubits, name="carry")
self.add_register(qr_carry)
if self.num_control_qubits > 0:
qr_control = AncillaRegister(self.num_control_qubits, name="control")
self.add_register(qr_control)
@property
def num_carry_qubits(self) -> int:
"""The number of carry qubits required to compute the sum.
Note that this is not necessarily equal to the number of ancilla qubits, these can
be queried using ``num_ancilla_qubits``.
Returns:
The number of carry qubits required to compute the sum.
"""
return self.num_sum_qubits - 1
@property
def num_control_qubits(self) -> int:
"""The number of additional control qubits required.
Note that the total number of ancilla qubits can be obtained by calling the
method ``num_ancilla_qubits``.
Returns:
The number of additional control qubits required (0 or 1).
"""
return int(self.num_sum_qubits > 2)
def _check_configuration(self, raise_on_failure=True):
"""Check if the current configuration is valid."""
valid = True
if self._num_state_qubits is None:
valid = False
if raise_on_failure:
raise AttributeError("The number of state qubits has not been set.")
if self._num_state_qubits != len(self.weights):
valid = False
if raise_on_failure:
raise ValueError("Mismatching number of state qubits and weights.")
return valid
def _build(self):
"""If not already built, build the circuit."""
if self._is_built:
return
super()._build()
num_result_qubits = self.num_state_qubits + self.num_sum_qubits
circuit = QuantumCircuit(*self.qregs)
qr_state = circuit.qubits[: self.num_state_qubits]
qr_sum = circuit.qubits[self.num_state_qubits : num_result_qubits]
qr_carry = circuit.qubits[num_result_qubits : num_result_qubits + self.num_carry_qubits]
qr_control = circuit.qubits[num_result_qubits + self.num_carry_qubits :]
# loop over state qubits and corresponding weights
for i, weight in enumerate(self.weights):
# only act if non-trivial weight
if np.isclose(weight, 0):
continue
# get state control qubit
q_state = qr_state[i]
# get bit representation of current weight
weight_binary = f"{int(weight):b}".rjust(self.num_sum_qubits, "0")[::-1]
# loop over bits of current weight and add them to sum and carry registers
for j, bit in enumerate(weight_binary):
if bit == "1":
if self.num_sum_qubits == 1:
circuit.cx(q_state, qr_sum[j])
elif j == 0:
# compute (q_sum[0] + 1) into (q_sum[0], q_carry[0])
# - controlled by q_state[i]
circuit.ccx(q_state, qr_sum[j], qr_carry[j])
circuit.cx(q_state, qr_sum[j])
elif j == self.num_sum_qubits - 1:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1] + 1) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.x(qr_sum[j])
circuit.x(qr_carry[j - 1])
circuit.mct(
[q_state, qr_sum[j], qr_carry[j - 1]],
qr_carry[j],
qr_control,
mode="v-chain",
)
circuit.cx(q_state, qr_carry[j])
circuit.x(qr_sum[j])
circuit.x(qr_carry[j - 1])
circuit.cx(q_state, qr_sum[j])
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
if self.num_sum_qubits == 1:
pass # nothing to do, since nothing to add
elif j == 0:
pass # nothing to do, since nothing to add
elif j == self.num_sum_qubits - 1:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j])
# - controlled by q_state[i] / last qubit,
# no carry needed by construction
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.mcx(
[q_state, qr_sum[j], qr_carry[j - 1]],
qr_carry[j],
qr_control,
mode="v-chain",
)
circuit.ccx(q_state, qr_carry[j - 1], qr_sum[j])
# uncompute carry qubits
for j in reversed(range(len(weight_binary))):
bit = weight_binary[j]
if bit == "1":
if self.num_sum_qubits == 1:
pass
elif j == 0:
circuit.x(qr_sum[j])
circuit.ccx(q_state, qr_sum[j], qr_carry[j])
circuit.x(qr_sum[j])
elif j == self.num_sum_qubits - 1:
pass
else:
circuit.x(qr_carry[j - 1])
circuit.mcx(
[q_state, qr_sum[j], qr_carry[j - 1]],
qr_carry[j],
qr_control,
mode="v-chain",
)
circuit.cx(q_state, qr_carry[j])
circuit.x(qr_carry[j - 1])
else:
if self.num_sum_qubits == 1:
pass
elif j == 0:
pass
elif j == self.num_sum_qubits - 1:
pass
else:
# compute (q_sum[j] + q_carry[j-1]) into (q_sum[j], q_carry[j])
# - controlled by q_state[i]
circuit.x(qr_sum[j])
circuit.mcx(
[q_state, qr_sum[j], qr_carry[j - 1]],
qr_carry[j],
qr_control,
mode="v-chain",
)
circuit.x(qr_sum[j])
self.append(circuit.to_gate(), self.qubits)
| {
"content_hash": "d4d588ffdf8acb61305092b4c2a2c22d",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 97,
"avg_line_length": 37.62461538461538,
"alnum_prop": 0.47325809617271836,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "f7d1dd6ef33947e5f32c1cf5f381dfc76793af0e",
"size": "12828",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/circuit/library/arithmetic/weighted_adder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
from bottle import hook, install, response, route, run
from bottle.ext.mongo import MongoPlugin
import server
# Borrowed from https://gist.github.com/richard-flosi/3789163
@hook('after_request')
def enable_cors():
"""
You need to add some headers to each request.
Don't use the wildcard '*' for Access-Control-Allow-Origin in production.
"""
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@route("<path:path>", method=["OPTIONS"])
def options(path):
return "Yeah it's fine mate."
install(MongoPlugin(uri="mongodb://db", db="quotes", json_mongo=True))
run(host="0.0.0.0", port=8000, reloader=True, debug=True)
| {
"content_hash": "a4bfc564a35da91d6ad56aeb0da034c6",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 117,
"avg_line_length": 38.68181818181818,
"alnum_prop": 0.7074030552291422,
"repo_name": "stilvoid/microservices-workshop",
"id": "ee7a0ec19b2f6fed77a3d0e6fec124ac369d5cb4",
"size": "851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stub/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13890"
},
{
"name": "HTML",
"bytes": "14208"
},
{
"name": "JavaScript",
"bytes": "17599"
},
{
"name": "Python",
"bytes": "8295"
},
{
"name": "Shell",
"bytes": "302"
},
{
"name": "Smarty",
"bytes": "7741"
}
],
"symlink_target": ""
} |
import tabulate
from tabulate import (
DataRow,
Line as TabulateLine,
TableFormat,
_binary_type,
_strip_invisible,
_text_type,
)
crate_fmt = TableFormat(lineabove=TabulateLine("+", "-", "+", "+"),
linebelowheader=TabulateLine("+", "-", "+", "+"),
linebetweenrows=None,
linebelow=TabulateLine("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=None)
def _format(val, valtype, floatfmt, missingval="", has_invisible=True):
"""Format a value according to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
""" # noqa
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
is_a_colored_number = has_invisible and isinstance(
val, (_text_type, _binary_type)
)
if is_a_colored_number:
raw_val = _strip_invisible(val)
formatted_val = format(float(raw_val), floatfmt)
return val.replace(raw_val, formatted_val)
# PATCH: Preserve string formatting even for numeric looking values.
# https://github.com/crate/crash/commit/1052e0d79
elif not floatfmt:
return str(val)
else:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def monkeypatch():
# Register custom table format.
tabulate._table_formats["cratedb"] = crate_fmt
tabulate.multiline_formats["cratedb"] = "cratedb"
# Module-level patch for more compact output.
# https://github.com/astanin/python-tabulate/issues/116
tabulate.MIN_PADDING = 0
# Override original `_format` helper function to make output format
# of float values consistent. See `PATCH` marker.
# Reference: https://github.com/crate/crash/commit/1052e0d79.
tabulate._format = _format
| {
"content_hash": "e707b423d0876df74b80240ca7b4430a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 197,
"avg_line_length": 35.310810810810814,
"alnum_prop": 0.5621890547263682,
"repo_name": "crate/crash",
"id": "7ee76fbd1a794bacf32577739d07a988fc839287",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crate/crash/tabulate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "147836"
},
{
"name": "Shell",
"bytes": "2244"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import importlib, inspect, os, sys
import numpy as np
from sklearn.datasets import make_regression
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import h2o
from h2o.sklearn import H2OGradientBoostingEstimator, H2OGradientBoostingRegressor, H2OScaler, H2OPCA
from h2o.sklearn.wrapper import H2OConnectionMonitorMixin
sys.path.insert(1, os.path.join("..",".."))
from tests import pyunit_utils, Namespace as ns
"""
This test suite creates sklearn pipelines using either a mix of sklearn+H2O components,
or only H2O components.
Then, it feeds them with H2O frames (more efficient and ensures compatibility with old API.)
or with numpy arrays to provide the simplest approach for users wanting to use H2O like any sklearn estimator.
"""
seed = 2019
init_connection_args = dict(strict_version_check=False, show_progress=True)
scores = {}
def _get_data(format='numpy'):
X, y = make_regression(n_samples=1000, n_features=10, n_informative=5, random_state=seed)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=seed)
data = ns(X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
if format == 'h2o':
for k, v in data.__dict__.items():
setattr(data, k, h2o.H2OFrame(v))
return data
def test_h2o_only_pipeline_with_h2o_frames():
pipeline = Pipeline([
('standardize', H2OScaler()),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed))
])
data = _get_data(format='h2o')
assert isinstance(data.X_train, h2o.H2OFrame)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, h2o.H2OFrame)
assert preds.dim == [len(data.X_test), 1]
# to get it working, we need to score a fresh H2OFrame
data = _get_data(format='h2o')
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test.as_data_frame().values, preds.as_data_frame().values)
assert abs(score - skl_score) < 1e-6, "score={}, skl_score={}".format(score, skl_score)
scores['h2o_only_pipeline_with_h2o_frame'] = score
def test_h2o_only_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline.
# Also note that in this specific case mixing numpy inputs with a fully H2O pipeline,
# the last estimator requires the `data_conversion=True` param in order to return numpy arrays in predictions.
pipeline = Pipeline([
('standardize', H2OScaler(init_connection_args=init_connection_args)),
('pca', H2OPCA(k=2, seed=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed, data_conversion=True))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['h2o_only_pipeline_with_numpy_arrays'] = score
def test_mixed_pipeline_with_numpy_arrays():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline
pipeline = Pipeline([
('standardize', StandardScaler()),
('pca', PCA(n_components=2, random_state=seed)),
('estimator', H2OGradientBoostingRegressor(seed=seed, init_connection_args=init_connection_args))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['mixed_pipeline_with_numpy_arrays'] = score
def test_generic_estimator_with_distribution_param():
# Note that in normal situations (release build), init_connection_args can be omitted
# otherwise, it should be set to the first H2O element in the pipeline
pipeline = Pipeline([
('standardize', StandardScaler()),
('pca', PCA(n_components=2, random_state=seed)),
('estimator', H2OGradientBoostingEstimator(distribution='gaussian', seed=seed, init_connection_args=init_connection_args))
])
data = _get_data(format='numpy')
assert isinstance(data.X_train, np.ndarray)
pipeline.fit(data.X_train, data.y_train)
preds = pipeline.predict(data.X_test)
assert isinstance(preds, np.ndarray)
assert preds.shape == (len(data.X_test),)
score = pipeline.score(data.X_test, data.y_test)
assert isinstance(score, float)
skl_score = r2_score(data.y_test, preds)
assert abs(score - skl_score) < 1e-6
scores['generic_estimator_with_distribution_param'] = score
def _assert_test_scores_equivalent(lk, rk):
if lk in scores and rk in scores:
assert abs(scores[lk] - abs(scores[rk])) < 1e-6, \
"expected equivalent scores but got {lk}={lscore} and {rk}={rscore}" \
.format(lk=lk, rk=rk, lscore=scores[lk], rscore=scores[rk])
elif lk not in scores:
print("no scores for {}".format(lk))
else:
print("no scores for {}".format(rk))
def test_scores_are_equivalent():
_assert_test_scores_equivalent('h2o_only_pipeline_with_h2o_frame', 'h2o_only_pipeline_with_numpy_arrays')
_assert_test_scores_equivalent('mixed_pipeline_with_numpy_arrays', 'generic_estimator_with_distribution_param')
pyunit_utils.run_tests([
test_h2o_only_pipeline_with_h2o_frames,
test_h2o_only_pipeline_with_numpy_arrays,
test_mixed_pipeline_with_numpy_arrays,
test_generic_estimator_with_distribution_param,
test_scores_are_equivalent,
])
| {
"content_hash": "4e51f1afe8de1d65ce20ae0f04a0bf0b",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 130,
"avg_line_length": 41.18064516129032,
"alnum_prop": 0.6968510104966317,
"repo_name": "michalkurka/h2o-3",
"id": "8274ad6e55bea6b5c7f0db6de99bc1920d164ad2",
"size": "6383",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_sklearn/pyunit_sklearn_regression_pipeline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "231770"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "Dockerfile",
"bytes": "10302"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "166480"
},
{
"name": "HCL",
"bytes": "15007"
},
{
"name": "HTML",
"bytes": "251906"
},
{
"name": "HiveQL",
"bytes": "3965"
},
{
"name": "Java",
"bytes": "11932863"
},
{
"name": "JavaScript",
"bytes": "89484"
},
{
"name": "Jupyter Notebook",
"bytes": "13867219"
},
{
"name": "Makefile",
"bytes": "50635"
},
{
"name": "Python",
"bytes": "6801044"
},
{
"name": "R",
"bytes": "3223113"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "33647"
},
{
"name": "Shell",
"bytes": "186559"
},
{
"name": "TeX",
"bytes": "634412"
}
],
"symlink_target": ""
} |
import os
import sys
def _gw_logging_cfg(log_file):
return {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "%(asctime)s - %(levelname)-5s - %(message)s"
},
'debug': {
'format': "%(asctime)s - %(levelname)-5s - %(name)-40s - %(message)-80s - %(module)s:%("
"funcName)s(%(lineno)s)"
},
},
'handlers': {
'console_stdout': {
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'level': 'INFO'
},
'file': {
"class": "logging.handlers.RotatingFileHandler",
"formatter": "debug",
"filename": log_file,
"maxBytes": 1024000,
"backupCount": 3,
'level': 'INFO'
},
},
'loggers': {
'': {
'handlers': ['console_stdout'],
'level': 'INFO',
'propagate': False
},
'groundwork': {
'handlers': ['console_stdout', 'file'],
'level': 'INFO',
'propagate': False
},
}
}
def test_logging_file(basicApp, tmpdir):
"""
The test case sets up a Groundwork application with logging to file with log level ``INFO``.
The application is then called to log a debug and an info message.
The generated logfile is then parsed and asserted, that only the info message can be found,
the debug message cannot.
"""
log_file = os.path.join(str(tmpdir), "test.log")
app = basicApp
# set up logging in the config, with log level INFO
app.config.set('GROUNDWORK_LOGGING', _gw_logging_cfg(log_file))
app._configure_logging(app.config.get("GROUNDWORK_LOGGING"))
debug_message = "This is a test debug message."
info_message = "This is a test info message."
app.log.debug(debug_message)
app.log.info(info_message)
# verify the contents of the log file
with open(log_file) as lf:
log = lf.read()
# at log level INFO, the DEBUG message should not be there
assert log.find(debug_message) == -1
# the INFO message should be there
assert log.find(info_message) > 0
def test_logging_console(basicApp, tmpdir, capsys):
"""
The test case sets up a Groundwork application with logging to stdout with log level ``INFO``.
The application is then called to log a debug and an info message.
The captured stdout is then parsed and asserted, that only the info message can be found,
the debug message cannot.
"""
log_file = os.path.join(str(tmpdir), "test.log")
app = basicApp
# set up logging in the config, with log level INFO
app.config.set('GROUNDWORK_LOGGING', _gw_logging_cfg(log_file))
app._configure_logging(app.config.get("GROUNDWORK_LOGGING"))
debug_message = "This is a test debug message."
info_message = "This is a test info message."
app.log.debug(debug_message)
app.log.info(info_message)
out, err = capsys.readouterr()
# at log level INFO, the DEBUG message should not be there
assert out.find(debug_message) == -1
# the INFO message should be there
assert out.find(info_message) > 0
| {
"content_hash": "40f81d783633ebe4d3109c6bf1c8e2a0",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 104,
"avg_line_length": 34.2,
"alnum_prop": 0.5628654970760234,
"repo_name": "useblocks/groundwork",
"id": "e7b165071bff7fd474ec3db65e1ccb7a2451fa2e",
"size": "3420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "8590"
},
{
"name": "CSS",
"bytes": "5479"
},
{
"name": "Makefile",
"bytes": "8232"
},
{
"name": "Python",
"bytes": "215683"
}
],
"symlink_target": ""
} |
import pytest
from jinja2 import nodes
from jinja2schema import parse, UnexpectedExpression, InvalidExpression
from jinja2schema.visitors.expr import visit_filter, Context
from jinja2schema.model import Dictionary, Scalar, List, Unknown, String, Number
def get_scalar_context(ast):
return Context(return_struct_cls=Scalar, predicted_struct=Scalar.from_ast(ast))
def test_string_filters():
for filter in ('striptags', 'capitalize', 'title', 'upper', 'urlize'):
template = '{{ x|' + filter + ' }}'
ast = parse(template).find(nodes.Filter)
ctx = Context(return_struct_cls=Scalar, predicted_struct=Scalar.from_ast(ast))
rtype, struct = visit_filter(ast, ctx)
expected_rtype = String(label='x', linenos=[1])
expected_struct = Dictionary({
'x': String(label='x', linenos=[1]),
})
assert rtype == expected_rtype
assert struct == expected_struct
def test_batch_and_slice_filters():
for filter in ('batch', 'slice'):
template = '{{ items|' + filter + '(3, " ") }}'
ast = parse(template).find(nodes.Filter)
unknown_ctx = Context(predicted_struct=Unknown.from_ast(ast))
rtype, struct = visit_filter(ast, unknown_ctx)
expected_rtype = List(List(Unknown(), linenos=[1]), linenos=[1])
assert rtype == expected_rtype
expected_struct = Dictionary({
'items': List(Unknown(), label='items', linenos=[1]),
})
assert struct == expected_struct
scalar_ctx = Context(predicted_struct=Scalar.from_ast(ast))
with pytest.raises(UnexpectedExpression) as e:
visit_filter(ast, scalar_ctx)
assert str(e.value) == ('conflict on the line 1\n'
'got: AST node jinja2.nodes.Filter of structure [[<unknown>]]\n'
'expected structure: <scalar>')
def test_default_filter():
template = '''{{ x|default('g') }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': String(label='x', linenos=[1], used_with_default=True, value='g'),
})
assert struct == expected_struct
def test_filter_chaining():
template = '''{{ (xs|first|last).gsom|sort|length }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'xs': List(List(Dictionary({
'gsom': List(Unknown(), label='gsom', linenos=[1]),
}, linenos=[1]), linenos=[1]), label='xs', linenos=[1]),
})
assert struct == expected_struct
template = '''{{ x|list|sort|first }}'''
ast = parse(template).find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
expected_struct = Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
assert struct == expected_struct
template = '''{{ x|first|list }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(UnexpectedExpression):
visit_filter(ast, get_scalar_context(ast))
def test_raise_on_unknown_filter():
template = '''{{ x|unknownfilter }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(InvalidExpression) as e:
visit_filter(ast, get_scalar_context(ast))
assert 'unknown filter' in str(e.value)
template = '''{{ x|attr('attr') }}'''
ast = parse(template).find(nodes.Filter)
with pytest.raises(InvalidExpression) as e:
visit_filter(ast, get_scalar_context(ast))
assert 'filter is not supported' in str(e.value)
def test_abs_filter():
ast = parse('{{ x|abs }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': Number(label='x', linenos=[1])
})
def test_int_filter():
ast = parse('{{ x|int }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': Scalar(label='x', linenos=[1]),
})
def test_wordcount_filter():
ast = parse('{{ x|wordcount }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='x', linenos=[1])
assert struct == Dictionary({
'x': String(label='x', linenos=[1])
})
def test_join_filter():
ast = parse('{{ xs|join(separator|default("|")) }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == String(label='xs', linenos=[1])
assert struct == Dictionary({
'xs': List(String(), label='xs', linenos=[1]),
'separator': String(label='separator', linenos=[1], used_with_default=True, value='|'),
})
def test_length_filter():
ast = parse('{{ xs|length }}').find(nodes.Filter)
rtype, struct = visit_filter(ast, get_scalar_context(ast))
assert rtype == Number(label='xs', linenos=[1])
assert struct == Dictionary({
'xs': List(Unknown(), label='xs', linenos=[1]),
}) | {
"content_hash": "0bcbac1074dcecd121dd451336b22795",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 96,
"avg_line_length": 35.11486486486486,
"alnum_prop": 0.6140080815855301,
"repo_name": "aromanovich/jinja2schema",
"id": "1758ef4d39c4a2a183362723bb3e422f3974838f",
"size": "5197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_filter_visitor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1931"
},
{
"name": "Python",
"bytes": "124042"
},
{
"name": "Shell",
"bytes": "165"
}
],
"symlink_target": ""
} |
"""Implements the OSEO GetOptions operation"""
import logging
import pyxb
import pyxb.bundles.opengis.oseo_1_0 as oseo
import pyxb.bundles.opengis.swe_2_0 as swe
from ..models import Order
from .. import settings
from .. import utilities
logger = logging.getLogger(__name__)
def create_oseo_order_options(collection, order_type):
collection_config = utilities.get_collection_settings(
collection_id=collection)
order_options = oseo.CommonOrderOptionsType(
productOrderOptionsId=".".join((order_type,
collection_config["name"])),
description="Options for submitting orders of type {!r} for "
"collection {!r} ".format(order_type, collection),
orderType=order_type
)
for option in collection_config[order_type.lower()]["options"]:
data_record = create_swe_data_record(option)
order_options.option.append(
pyxb.BIND(AbstractDataComponent=data_record))
delivery_options = order_options.productDeliveryOptions
data_access_options = settings.get_online_data_access_options()
if any(data_access_options):
delivery_options.append(pyxb.BIND(onlineDataAccess=pyxb.BIND()))
for data_access_option in data_access_options:
delivery_options[-1].onlineDataAccess.append(
oseo.ProtocolType(data_access_option["protocol"]))
data_delivery_options = settings.get_online_data_delivery_options()
if any(data_delivery_options):
delivery_options.append(pyxb.BIND(onlineDataDelivery=pyxb.BIND()))
for data_delivery_option in data_delivery_options:
delivery_options[-1].onlineDataDelivery.append(
oseo.ProtocolType(data_delivery_option["protocol"]))
media_delivery_options = settings.get_media_delivery_options()
if any(media_delivery_options["media"]):
delivery_options.append(pyxb.BIND(mediaDelivery=pyxb.BIND()))
for medium_options in media_delivery_options["media"]:
delivery_options[-1].mediaDelivery.append(
oseo.PackageMedium(medium_options["type"].value))
return order_options
def create_swe_data_record(option_name):
option_config = utilities.get_processing_option_settings(option_name)
data_record = swe.DataRecord()
data_record.field.append(pyxb.BIND())
data_record.field[0].name = option_name
category = swe.Category(updatable=False)
category.optional = True
#cat.definition = 'http://geoland2.meteo.pt/ordering/def/%s' % \
# option.name
#cat.identifier = option.name
#cat.description = _n(option.description)
choices = option_config.get("choices", [])
if any(choices):
category.constraint = pyxb.BIND()
allowed_tokens = swe.AllowedTokens()
for choice in choices:
allowed_tokens.value_.append(choice)
category.constraint.append(allowed_tokens)
data_record.field[0].append(category)
return data_record
# TODO - Implement retrieval of options for subscription orders
def get_options(request, user):
"""Implements the OSEO GetOptions operation.
Parameters
----------
request: pyxb.bundles.opengis.raw.oseo.OrderOptionsRequestType
The instance with the request parameters
user: django.contrib.auth.models.User
User making the request
Returns
-------
oseo.GetOptionsResponse:
The response object
"""
response = oseo.GetOptionsResponse(status="success")
if request.collectionId is not None:
product_order_options = create_oseo_order_options(
collection=request.collectionId,
order_type=Order.PRODUCT_ORDER
)
subscription_order_options = create_oseo_order_options(
collection=request.collectionId,
order_type=Order.SUBSCRIPTION_ORDER
)
for order_option in (product_order_options,
subscription_order_options):
if order_option is not None:
response.orderOptions.append(order_option)
elif any(request.identifier):
# retrieve the products from the catalogue using the identifier
# assess their collection and return the collection options
raise NotImplementedError
else: # tasking request id
raise NotImplementedError
return response
| {
"content_hash": "ac66c27ce8c698b1be8db795e5670ecf",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 74,
"avg_line_length": 38.51754385964912,
"alnum_prop": 0.6720564791619221,
"repo_name": "pyoseo/django-oseoserver",
"id": "6bcb73758ea1427ebce39643608c713e29d7ec3a",
"size": "4992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oseoserver/operations/getoptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5653"
},
{
"name": "Python",
"bytes": "282717"
},
{
"name": "Shell",
"bytes": "17370"
}
],
"symlink_target": ""
} |
""" For docs, see https://github.com/pkimber/cloud_docs """
import os
from pkg_resources import safe_name
from fabric.api import (
abort,
cd,
env,
run,
)
from fabric.colors import (
green,
yellow,
)
from fabric.contrib.files import exists
from fabric.contrib.project import rsync_project
from lib.browser import BrowserDriver
from lib.command import DjangoCommand
env.use_ssh_config = True
SECRET_KEY = 'SECRET_KEY'
def download_package(site_name, prefix, version, temp_folder, venv_folder):
package_name = '{}-{}=={}'.format(prefix, safe_name(site_name), version)
print(green("download package: {}".format(package_name)))
pip_bin = os.path.join(venv_folder, 'bin', 'pip')
run('{} install --download={} --no-deps {}'.format(
pip_bin, temp_folder, package_name
))
def extract_project_package(install_folder, temp_folder, site_name, prefix, version):
package_archive = '{}-{}-{}.tar.gz'.format(prefix, safe_name(site_name), version)
print(green("extract project package: {}".format(package_archive)))
run('tar --strip-components=1 --directory={} -xzf {}'.format(
install_folder,
os.path.join(temp_folder, package_archive)
))
def install_requirements(prefix, install_folder, venv_folder):
""" Download python packages from our package index """
filename = os.path.join(install_folder, 'requirements/production.txt')
print(green("requirements: {}".format(filename)))
pip_bin = os.path.join(venv_folder, 'bin', 'pip')
run("{} install --upgrade pip".format(pip_bin))
run("{} install -r {}".format(pip_bin, filename))
def mkvirtualenv(venv_folder):
print(green("mkvirtualenv: {}".format(venv_folder)))
run('/usr/bin/virtualenv {} {}'.format(
'--python=/usr/bin/python3',
venv_folder,
))
def link_install_to_live_folder(install_folder, live_folder):
print(green("link '{0}' folder to '{1}'".format(live_folder, install_folder)))
if exists(live_folder):
run('rm {0}'.format(live_folder))
run('ln -s {0} {1}'.format(install_folder, live_folder))
def touch_vassal_ini(vassal_ini_file_names):
print(green("touch"))
for file_name in vassal_ini_file_names:
if exists(file_name):
run('touch {0}'.format(file_name))
else:
abort('uwsgi ini file does not exist: {0}'.format(file_name))
def run_post_deploy_test(site_name):
browser_driver = BrowserDriver(site_name)
browser_driver.test()
browser_driver.test_sitemap()
browser_driver.close()
def deploy_django(folder_info, site_info, version):
# virtualenv
mkvirtualenv(folder_info.install_venv())
# download and extract main package
download_package(
site_info.package,
site_info.prefix(),
version,
folder_info.install_temp(),
folder_info.install_venv()
)
extract_project_package(
folder_info.install(),
folder_info.install_temp(),
site_info.package,
site_info.prefix(),
version
)
# debug
run('ls -l {0}'.format(folder_info.install()))
# requirements
install_requirements(
site_info.prefix(),
folder_info.install(),
folder_info.install_venv()
)
command = DjangoCommand(
folder_info.install(), folder_info.install_venv(), site_info
)
command.collect_static()
command.compress()
# migrate database and init project
if site_info.has_database:
command.migrate_database()
command.init_project()
def django_post_deploy(folder_info):
# re-start uwsgi apps
touch_vassal_ini(folder_info.vassals())
def deploy_php(folder_info, site_info):
rsync_project(
local_dir='../deploy/upload/',
remote_dir=folder_info.upload(),
)
packages = site_info.packages()
for package in packages:
name = package['name']
archive = package['archive']
folder = package.get('folder', None)
tar_opt = package.get('tar', '')
print(yellow(name))
if folder:
install = os.path.join(folder_info.install(), folder)
if not exists(install):
print(yellow(' {}'.format(install)))
run('mkdir -p {}'.format(install))
else:
install = folder_info.install()
with cd(install):
print(yellow(' {}'.format(archive)))
print(yellow(' {}'.format(tar_opt)))
run('tar {} -xzf {}'.format(
tar_opt,
os.path.join(folder_info.upload(), archive),
))
| {
"content_hash": "831e12fd0380442e4c36e71b05e37b57",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 85,
"avg_line_length": 30.18300653594771,
"alnum_prop": 0.6167171935902989,
"repo_name": "pkimber/fabric",
"id": "03eb08460f92f36d83dbd99e7f54966222f7a61a",
"size": "4618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/deploy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "98528"
},
{
"name": "SaltStack",
"bytes": "8832"
},
{
"name": "Scheme",
"bytes": "282"
}
],
"symlink_target": ""
} |
from runner.koan import *
import jims
import joes
counter = 0 # Global
class AboutScope(Koan):
#
# NOTE:
# Look in jims.py and joes.py to see definitions of Dog used
# for this set of tests
#
def test_dog_is_not_available_in_the_current_scope(self):
try:
fido = Dog()
except Exception as ex:
self.assertMatch("name 'Dog' is not defined", ex[0])
def test_you_can_reference_nested_classes_using_the_scope_operator(self):
fido = jims.Dog()
# name 'jims' module name is taken from jim.py filename
rover = joes.Dog()
self.assertEqual("jims dog", fido.identify())
self.assertEqual("joes dog", rover.identify())
self.assertEqual(False, type(fido) == type(rover))
self.assertEqual(False, jims.Dog == joes.Dog)
# ------------------------------------------------------------------
class str(object):
pass
def test_bare_bones_class_names_do_not_assume_the_current_scope(self):
self.assertEqual(False, AboutScope.str == str)
def test_nested_string_is_not_the_same_as_the_system_string(self):
self.assertEqual(False, self.str == type("HI"))
def test_str_without_self_prefix_stays_in_the_global_scope(self):
self.assertEqual(True, str == type("HI"))
# ------------------------------------------------------------------
PI = 3.1416
def test_constants_are_defined_with_an_initial_uppercase_letter(self):
self.assertAlmostEqual(3.1416, self.PI)
# Note, floating point numbers in python are not precise.
# assertAlmostEqual will check that it is 'close enough'
def test_constants_are_assumed_by_convention_only(self):
self.PI = "rhubarb"
self.assertEqual("rhubarb", self.PI)
# There aren't any real constants in python. Its up to the developer
# to keep to the convention and not modify them.
# ------------------------------------------------------------------
def increment_using_local_counter(self, counter):
counter = counter + 1
def increment_using_global_counter(self):
global counter
counter = counter + 1
def test_incrementing_with_local_counter(self):
global counter
start = counter
self.increment_using_local_counter(start)
self.assertEqual(False, counter == start + 1)
def test_incrementing_with_global_counter(self):
global counter
start = counter
self.increment_using_global_counter()
self.assertEqual(True, counter == start + 1)
# ------------------------------------------------------------------
global deadly_bingo
deadly_bingo = [4, 8, 15, 16, 23, 42]
def test_global_attributes_can_be_created_in_the_middle_of_a_class(self):
self.assertEqual(42, deadly_bingo[5])
| {
"content_hash": "771c082036aebcafe8fcb85de4bb7677",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 77,
"avg_line_length": 33,
"alnum_prop": 0.5641811372148451,
"repo_name": "aomelchenko/python_koans",
"id": "0aa5cf7e842175bcb09085283dbfea085dc3312a",
"size": "2984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_scope.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "331860"
},
{
"name": "Shell",
"bytes": "1525"
}
],
"symlink_target": ""
} |
import unittest
import pep8
import os.path
tests_dir = os.path.dirname(__file__)
modules_dir = os.path.abspath(os.path.join(tests_dir, "..", "coal"))
class TestCodeStyle(unittest.TestCase):
def test_pep8_conformance(self):
pep8style = pep8.StyleGuide()
result = pep8style.check_files([tests_dir, modules_dir])
self.assertEqual(
result.total_errors,
0,
"Found pep8 conformance issues",
)
| {
"content_hash": "a36f2fa885df9e92db0384d231dc0e06",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 24.42105263157895,
"alnum_prop": 0.6271551724137931,
"repo_name": "saymedia/python-coal",
"id": "068bbc74436fd0e267247968d9cddb5a4c84cc8f",
"size": "464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_code_style.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33692"
}
],
"symlink_target": ""
} |
from flask_script import Manager
from sched.app import app, db
# By default, Flask-Script adds the 'runserver' and 'shell' commands to
# interact with the Flask application. Add additional commands using the
# `@manager.command` decorator, where Flask-Script will create help
# documentation using the function's docstring. Try it, and call `python
# manage.py -h` to see the outcome.
manager = Manager(app)
@manager.command
def create_tables():
"Create relational database tables."
db.create_all()
@manager.command
def drop_tables():
"Drop all project relational database tables. THIS DELETES DATA."
db.drop_all()
if __name__ == '__main__':
manager.run()
| {
"content_hash": "5e82cc50a8c3c8edb3a3c325022ed8b5",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 26.307692307692307,
"alnum_prop": 0.7236842105263158,
"repo_name": "abacuspix/NFV_project",
"id": "90a7da9e403f61d6afb09ddbd471ea5b35dd5dea",
"size": "684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Instant_Flask_Web_Development/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6037"
},
{
"name": "Gherkin",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "342352"
},
{
"name": "JavaScript",
"bytes": "8828"
},
{
"name": "Mako",
"bytes": "2224"
},
{
"name": "Nginx",
"bytes": "231"
},
{
"name": "Python",
"bytes": "706126"
}
],
"symlink_target": ""
} |
def largestValuesInTreeRows(t):
current_floor = [t]
res = []
while len(current_floor):
next_floor = []
m = []
for node in current_floor:
# print("node:", node)
if node and node.value:
# print("root:", node.value)
m.append(node.value)
if node and node.left:
# print("left :", node.left.value)
next_floor.append(node.left)
if node and node.right:
# print("right:", node.right.value)
next_floor.append(node.right)
if len(m) > 0:
res.append(max(m))
current_floor = next_floor
return res
| {
"content_hash": "4eb47c6eef563186edb2c294460638c4",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 51,
"avg_line_length": 31,
"alnum_prop": 0.4726507713884993,
"repo_name": "emirot/codefights",
"id": "79bcaf9a223ccae4b4cf16ccae0229681b5dbab5",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interviewPractice/largestValuesInTreeRows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "104702"
}
],
"symlink_target": ""
} |
"""
A scrpit to load comment into Database
Author: Ming
Usage:
usage: load_comments.py [-h] [--source SOURCE] [--start START] [--end END]
[--user USER] [--passwd PASSWD] [--db DBNAME]
optional arguments:
-h, --help show this help message and exit
--source SOURCE The source info, which would be used when openning
cooresponding json file
--start START The start date, in form of YYYY-MM-DD
--end END The end date, in form of YYYY-MM-DD, The range is inclusive
--user USER The user name of database
--passwd PASSWD The password of database
--db DBNAME The name of database
Example:
python load_entities.py --source=imdb --start=2015-01-01 --end=2015-01-01 --user=ming --passwd=fang --db=test')
"""
import MySQLdb as mdb
from datetime import date
from datetime import datetime
from dateutil.rrule import rrule, DAILY
import sys
import json
import argparse
import os.path
from sets import Set
datadir = '/home/mingf/comment_full/'
homedir = '/home/mingf/Weiss/'
module = 'mysql/'
dbsetting = '/home/mingf/Weiss/scrapers/imdb/dbsetting.json'
release_date = ''
cfile = ''
efile = ''
dbh = None
dbc = None
def _dict2tuple(entry):
return (entry['eid'],
entry['body'],
entry['rating'] or '-1',
entry['author'] or u'',
entry['title'] or u'',
entry['time'],
entry['sentiment'],
entry['csid']
)
def getHistory(source):
with open(dbsetting, 'r') as f:
setting = json.load(f)
dbh = mdb.connect(host=setting['host'], user=setting['user'], passwd=setting['passed'], db=setting['db'])
dbc = dbh.cursor()
dbc.execute("select comment.id from comment, entity where comment.eid=entity.eid and entity.source='%s'" % source)
res = dbc.fetchall()
IDs = map(lambda x: x[0], list(res))
return Set(IDs)
def run(IDs):
if (not os.path.exists(cfile)):
print "No such file",cfile
return
with open(cfile, 'r') as f:
data = json.load(f)
data = [_dict2tuple(comment) for comments in data for comment in comments]
print "About to load", thisdate, "with", len(data), "comments"
if (len(data) == 0):
return
data = filter(lambda comment: comment[7] not in IDs, data)
print "After filtering," , len(data), "comments left"
dbc.executemany(
"""INSERT INTO comment (eid, body, rating, author, title, time, sentiment, id)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""", data)
dbh.commit()
def _arg_parser():
parser = argparse.ArgumentParser(description='A script to load comments info into database.\nEx: python load_comments.py --source=imdb --start=2015-01-01 --end=2015-01-01 --user=ming --passwd=fang --db=test')
parser.add_argument('--source', dest='source', action='store', help='The source info, which would be used when openning cooresponding json file', required=True)
parser.add_argument('--start', dest='start', action='store', help='The start date, in form of YYYY-MM-DD', required=True)
parser.add_argument('--end', dest='end', action='store', help='The end date, in form of YYYY-MM-DD, The range is inclusive', required=True)
parser.add_argument('--user', dest='user', action='store', help='The user name of database', required=True)
parser.add_argument('--passwd', dest='passwd', action='store', help='The password of database', required=True)
parser.add_argument('--db', dest='dbname', action='store', help='The name of database', required=True)
results = parser.parse_args()
user = results.user
passwd = results.passwd
source = results.source
dbname = results.dbname
start = datetime.strptime(results.start, '%Y-%m-%d').date()
end = datetime.strptime(results.end, '%Y-%m-%d').date()
return (user, passwd, start, end, dbname, source)
if __name__ == '__main__':
user, passwd, start, end, dbname, source = _arg_parser()
with open(dbsetting, 'r') as f:
setting = json.load(f)
dbh = mdb.connect(host=setting['host'], user=setting['user'], passwd=setting['passed'], db=setting['db'])
dbc = dbh.cursor()
dbh.set_character_set('utf8')
dbc.execute('SET NAMES utf8;')
dbc.execute('SET CHARACTER SET utf8;')
dbc.execute('SET character_set_connection=utf8;')
for dt in rrule(DAILY, dtstart = start, until = end):
IDs = getHistory(source)
thisdate = dt.strftime('%Y-%m-%d')
cfile = '%s%s_comments_%s.json' % (datadir, source, thisdate)
run(IDs)
dbh.close()
| {
"content_hash": "de099c7904e86bc03affe0361c710f47",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 212,
"avg_line_length": 35.8125,
"alnum_prop": 0.6348167539267016,
"repo_name": "WangWenjun559/Weiss",
"id": "fd2e05ab6cbe1c7b4257c05ae7b5349833913a15",
"size": "4584",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysql/load_comments.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "HTML",
"bytes": "1778"
},
{
"name": "Java",
"bytes": "222411"
},
{
"name": "JavaScript",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "5454801"
},
{
"name": "Shell",
"bytes": "5102"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import os
import sys
import re
from distutils.dep_util import newer_group, newer
from glob import glob
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['STOPTEST2.f.src','getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
config.add_extension('_iterative',
sources=sources,
extra_info=lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| {
"content_hash": "588225aaff90bd71c08fd64aa5c1bdcf",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 67,
"avg_line_length": 29.176470588235293,
"alnum_prop": 0.6122311827956989,
"repo_name": "juliantaylor/scipy",
"id": "ce1eaeedaf75e7eb0ca6b9e1b8a5d4be9b00a875",
"size": "1510",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scipy/sparse/linalg/isolve/setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4674358"
},
{
"name": "C++",
"bytes": "9673833"
},
{
"name": "CSS",
"bytes": "2624"
},
{
"name": "FORTRAN",
"bytes": "6351600"
},
{
"name": "M",
"bytes": "66"
},
{
"name": "Matlab",
"bytes": "4280"
},
{
"name": "Objective-C",
"bytes": "4666"
},
{
"name": "Python",
"bytes": "7389933"
},
{
"name": "Shell",
"bytes": "1612"
},
{
"name": "TeX",
"bytes": "37261"
},
{
"name": "nesC",
"bytes": "1736"
}
],
"symlink_target": ""
} |
import sys, re, time, string
import numpy;
import scipy;
import scipy.special;
import nltk;
def compute_dirichlet_expectation(dirichlet_parameter):
if (len(dirichlet_parameter.shape) == 1):
return scipy.special.psi(dirichlet_parameter) - scipy.special.psi(numpy.sum(dirichlet_parameter))
return scipy.special.psi(dirichlet_parameter) - scipy.special.psi(numpy.sum(dirichlet_parameter, 1))[:, numpy.newaxis]
class Inferencer:
def __init__(self,
hash_oov_words=False,
compute_elbo=True
):
self._hash_oov_words = hash_oov_words;
self._compute_elbo = compute_elbo;
def _initialize(self,
vocab,
number_of_documents,
number_of_topics,
alpha_theta,
alpha_eta,
tau0,
kappa
):
self._vocab = dict()
for word in vocab:
word = word.lower()
word = re.sub(r'[^a-z]', '', word)
self._vocab[word] = len(self._vocab)
self._number_of_topics = number_of_topics
self._vocab_size = len(self._vocab)
self._number_of_documents = number_of_documents
self._alpha_theta = alpha_theta
self._alpha_eta = alpha_eta
self._tau = tau0 + 1
self._kappa = kappa
self._counter = 0
self._epsilon = pow(self._tau + self._counter, -self._kappa)
# Initialize the variational distribution q(beta|lambda)
self._beta = 1*numpy.random.gamma(100., 1./100., (self._number_of_topics, self._vocab_size))
self._exp_E_log_beta = numpy.exp(compute_dirichlet_expectation(self._beta));
def parse_doc_list(self, docs):
raise NotImplementedError;
def e_step(self, wordids):
raise NotImplementedError;
def m_step(self, batch_size, sstats):
# rhot will be between 0 and 1, and says how much to weight the information we got from this mini-batch.
self._epsilon = pow(self._tau + self._counter, -self._kappa)
# update lambda based on documents.
self._beta = self._beta * (1-self._epsilon) + self._epsilon * (self._alpha_eta + self._number_of_documents * sstats / batch_size);
expect_log_beta = compute_dirichlet_expectation(self._beta);
self._exp_E_log_beta = numpy.exp(expect_log_beta);
corpus_level_elbo = 0;
if self._compute_elbo:
corpus_level_elbo += numpy.sum((self._alpha_eta - self._beta) * expect_log_beta);
corpus_level_elbo += numpy.sum(scipy.special.gammaln(self._beta) - scipy.special.gammaln(self._alpha_eta));
corpus_level_elbo += numpy.sum(scipy.special.gammaln(self._alpha_eta * self._vocab_size) - scipy.special.gammaln(numpy.sum(self._beta, 1)))
return corpus_level_elbo;
def learning(self, docs):
raise NotImplementedError;
"""
"""
def export_beta(self, exp_beta_path, top_display=-1):
self._exp_E_log_beta = numpy.exp(compute_dirichlet_expectation(self._beta));
output = open(exp_beta_path, 'w');
for k in xrange(self._number_of_topics):
output.write("==========\t%d\t==========\n" %(k));
freqdist = nltk.probability.FreqDist();
freqdist.clear();
for word in self._vocab.keys():
#freqdist.inc(word, self._exp_E_log_beta[k, self._vocab[word]]);
freqdist[word]+=self._exp_E_log_beta[k, self._vocab[word]];
i=0;
for key in freqdist.keys():
i += 1;
output.write(key + "\t" + str(freqdist[key]) + "\n");
if top_display>0 and i>=top_display:
break;
| {
"content_hash": "6d2d2376bbad1fa4fc3fb07b20da6d0c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 151,
"avg_line_length": 39.72164948453608,
"alnum_prop": 0.559563976122502,
"repo_name": "kzhai/InfVocLDA",
"id": "d32f1d5a74b76f9fdbe969a00fd6bc24eadb7996",
"size": "3853",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fixvoc/inferencer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "92271"
}
],
"symlink_target": ""
} |
from functools import partial, wraps
from slm_lab.lib import logger, optimizer, util
import os
import pydash as ps
import torch
import torch.nn as nn
logger = logger.get_logger(__name__)
# register custom torch.optim
setattr(torch.optim, 'GlobalAdam', optimizer.GlobalAdam)
setattr(torch.optim, 'GlobalRMSprop', optimizer.GlobalRMSprop)
setattr(torch.optim, 'Lookahead', optimizer.Lookahead)
setattr(torch.optim, 'RAdam', optimizer.RAdam)
class NoOpLRScheduler:
'''Symbolic LRScheduler class for API consistency'''
def __init__(self, optim):
self.optim = optim
def step(self, epoch=None):
pass
def get_lr(self):
if hasattr(self.optim, 'defaults'):
return self.optim.defaults['lr']
else: # TODO retrieve lr more generally
return self.optim.param_groups[0]['lr']
def build_fc_model(dims, activation=None):
'''Build a full-connected model by interleaving nn.Linear and activation_fn'''
assert len(dims) >= 2, 'dims need to at least contain input, output'
# shift dims and make pairs of (in, out) dims per layer
dim_pairs = list(zip(dims[:-1], dims[1:]))
layers = []
for in_d, out_d in dim_pairs:
layers.append(nn.Linear(in_d, out_d))
if activation is not None:
layers.append(get_activation_fn(activation))
model = nn.Sequential(*layers)
return model
def get_nn_name(uncased_name):
'''Helper to get the proper name in PyTorch nn given a case-insensitive name'''
for nn_name in nn.__dict__:
if uncased_name.lower() == nn_name.lower():
return nn_name
raise ValueError(f'Name {uncased_name} not found in {nn.__dict__}')
def get_activation_fn(activation):
'''Helper to generate activation function layers for net'''
ActivationClass = getattr(nn, get_nn_name(activation))
return ActivationClass()
def get_loss_fn(cls, loss_spec):
'''Helper to parse loss param and construct loss_fn for net'''
LossClass = getattr(nn, get_nn_name(loss_spec['name']))
loss_spec = ps.omit(loss_spec, 'name')
loss_fn = LossClass(**loss_spec)
return loss_fn
def get_lr_scheduler(optim, lr_scheduler_spec):
'''Helper to parse lr_scheduler param and construct Pytorch optim.lr_scheduler'''
if ps.is_empty(lr_scheduler_spec):
lr_scheduler = NoOpLRScheduler(optim)
elif lr_scheduler_spec['name'] == 'LinearToZero':
LRSchedulerClass = getattr(torch.optim.lr_scheduler, 'LambdaLR')
frame = float(lr_scheduler_spec['frame'])
lr_scheduler = LRSchedulerClass(optim, lr_lambda=lambda x: 1 - x / frame)
else:
LRSchedulerClass = getattr(torch.optim.lr_scheduler, lr_scheduler_spec['name'])
lr_scheduler_spec = ps.omit(lr_scheduler_spec, 'name')
lr_scheduler = LRSchedulerClass(optim, **lr_scheduler_spec)
return lr_scheduler
def get_optim(net, optim_spec):
'''Helper to parse optim param and construct optim for net'''
OptimClass = getattr(torch.optim, optim_spec['name'])
optim_spec = ps.omit(optim_spec, 'name')
if torch.is_tensor(net): # for non-net tensor variable
optim = OptimClass([net], **optim_spec)
else:
optim = OptimClass(net.parameters(), **optim_spec)
return optim
def get_policy_out_dim(body):
'''Helper method to construct the policy network out_dim for a body according to is_discrete, action_type'''
action_dim = body.action_dim
if body.is_discrete:
if body.action_type == 'multi_discrete':
assert ps.is_list(action_dim), action_dim
policy_out_dim = action_dim
else:
assert ps.is_integer(action_dim), action_dim
policy_out_dim = action_dim
else:
assert ps.is_integer(action_dim), action_dim
if action_dim == 1: # single action, use [loc, scale]
policy_out_dim = 2
else: # multi-action, use [locs], [scales]
policy_out_dim = [action_dim, action_dim]
return policy_out_dim
def get_out_dim(body, add_critic=False):
'''Construct the NetClass out_dim for a body according to is_discrete, action_type, and whether to add a critic unit'''
policy_out_dim = get_policy_out_dim(body)
if add_critic:
if ps.is_list(policy_out_dim):
out_dim = policy_out_dim + [1]
else:
out_dim = [policy_out_dim, 1]
else:
out_dim = policy_out_dim
return out_dim
def init_layers(net, init_fn_name):
'''Primary method to initialize the weights of the layers of a network'''
if init_fn_name is None:
return
# get nonlinearity
nonlinearity = get_nn_name(net.hid_layers_activation).lower()
if nonlinearity == 'leakyrelu':
nonlinearity = 'leaky_relu' # guard name
# get init_fn and add arguments depending on nonlinearity
init_fn = getattr(nn.init, init_fn_name)
if 'kaiming' in init_fn_name: # has 'nonlinearity' as arg
assert nonlinearity in ['relu', 'leaky_relu'], f'Kaiming initialization not supported for {nonlinearity}'
init_fn = partial(init_fn, nonlinearity=nonlinearity)
elif 'orthogonal' in init_fn_name or 'xavier' in init_fn_name: # has 'gain' as arg
gain = nn.init.calculate_gain(nonlinearity)
init_fn = partial(init_fn, gain=gain)
else:
pass
# finally, apply init_params to each layer in its modules
net.apply(partial(init_params, init_fn=init_fn))
def init_params(module, init_fn):
'''Initialize module's weights using init_fn, and biases to 0.0'''
bias_init = 0.0
classname = util.get_class_name(module)
if 'Net' in classname: # skip if it's a net, not pytorch layer
pass
elif classname == 'BatchNorm2d':
pass # can't init BatchNorm2d
elif any(k in classname for k in ('Conv', 'Linear')):
init_fn(module.weight)
nn.init.constant_(module.bias, bias_init)
elif 'GRU' in classname:
for name, param in module.named_parameters():
if 'weight' in name:
init_fn(param)
elif 'bias' in name:
nn.init.constant_(param, bias_init)
else:
pass
# params methods
def save(net, model_path):
'''Save model weights to path'''
torch.save(net.state_dict(), util.smart_path(model_path))
def save_algorithm(algorithm, ckpt=None):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
model_prepath = agent.spec['meta']['model_prepath']
if ckpt is not None:
model_prepath += f'_ckpt-{ckpt}'
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{model_prepath}_{net_name}_model.pt'
save(net, model_path)
optim_name = net_name.replace('net', 'optim')
optim = getattr(algorithm, optim_name, None)
if optim is not None: # only trainable net has optim
optim_path = f'{model_prepath}_{net_name}_optim.pt'
save(optim, optim_path)
logger.debug(f'Saved algorithm {util.get_class_name(algorithm)} nets {net_names} to {model_prepath}_*.pt')
def load(net, model_path):
'''Save model weights from a path into a net module'''
device = None if torch.cuda.is_available() else 'cpu'
net.load_state_dict(torch.load(util.smart_path(model_path), map_location=device))
def load_algorithm(algorithm):
'''Save all the nets for an algorithm'''
agent = algorithm.agent
net_names = algorithm.net_names
model_prepath = agent.spec['meta']['model_prepath']
if util.get_lab_mode() == 'enjoy':
model_prepath += '_ckpt-best'
logger.info(f'Loading algorithm {util.get_class_name(algorithm)} nets {net_names} from {model_prepath}_*.pt')
for net_name in net_names:
net = getattr(algorithm, net_name)
model_path = f'{model_prepath}_{net_name}_model.pt'
load(net, model_path)
optim_name = net_name.replace('net', 'optim')
optim = getattr(algorithm, optim_name, None)
if optim is not None: # only trainable net has optim
optim_path = f'{model_prepath}_{net_name}_optim.pt'
load(optim, optim_path)
def copy(src_net, tar_net):
'''Copy model weights from src to target'''
tar_net.load_state_dict(src_net.state_dict())
def polyak_update(src_net, tar_net, old_ratio=0.5):
'''
Polyak weight update to update a target tar_net, retain old weights by its ratio, i.e.
target <- old_ratio * source + (1 - old_ratio) * target
'''
for src_param, tar_param in zip(src_net.parameters(), tar_net.parameters()):
tar_param.data.copy_(old_ratio * src_param.data + (1.0 - old_ratio) * tar_param.data)
def to_check_train_step():
'''Condition for running assert_trained'''
return os.environ.get('PY_ENV') == 'test' or util.get_lab_mode() == 'dev'
def dev_check_train_step(fn):
'''
Decorator to check if net.train_step actually updates the network weights properly
Triggers only if to_check_train_step is True (dev/test mode)
@example
@net_util.dev_check_train_step
def train_step(self, ...):
...
'''
@wraps(fn)
def check_fn(*args, **kwargs):
if not to_check_train_step():
return fn(*args, **kwargs)
net = args[0] # first arg self
# get pre-update parameters to compare
pre_params = [param.clone() for param in net.parameters()]
# run train_step, get loss
loss = fn(*args, **kwargs)
assert not torch.isnan(loss).any(), loss
# get post-update parameters to compare
post_params = [param.clone() for param in net.parameters()]
if loss == 0.0:
# if loss is 0, there should be no updates
# TODO if without momentum, parameters should not change too
for p_name, param in net.named_parameters():
assert param.grad.norm() == 0
else:
# check parameter updates
try:
assert not all(torch.equal(w1, w2) for w1, w2 in zip(pre_params, post_params)), f'Model parameter is not updated in train_step(), check if your tensor is detached from graph. Loss: {loss:g}'
except Exception as e:
logger.error(e)
if os.environ.get('PY_ENV') == 'test':
# raise error if in unit test
raise(e)
# check grad norms
min_norm, max_norm = 0.0, 1e5
for p_name, param in net.named_parameters():
try:
grad_norm = param.grad.norm()
assert min_norm < grad_norm < max_norm, f'Gradient norm for {p_name} is {grad_norm:g}, fails the extreme value check {min_norm} < grad_norm < {max_norm}. Loss: {loss:g}. Check your network and loss computation.'
except Exception as e:
logger.warning(e)
logger.debug('Passed network parameter update check.')
# store grad norms for debugging
net.store_grad_norms()
return loss
return check_fn
def get_grad_norms(algorithm):
'''Gather all the net's grad norms of an algorithm for debugging'''
grad_norms = []
for net_name in algorithm.net_names:
net = getattr(algorithm, net_name)
if net.grad_norms is not None:
grad_norms.extend(net.grad_norms)
return grad_norms
def init_global_nets(algorithm):
'''
Initialize global_nets for Hogwild using an identical instance of an algorithm from an isolated Session
in spec.meta.distributed, specify either:
- 'shared': global network parameter is shared all the time. In this mode, algorithm local network will be replaced directly by global_net via overriding by identify attribute name
- 'synced': global network parameter is periodically synced to local network after each gradient push. In this mode, algorithm will keep a separate reference to `global_{net}` for each of its network
'''
dist_mode = algorithm.agent.spec['meta']['distributed']
assert dist_mode in ('shared', 'synced'), f'Unrecognized distributed mode'
global_nets = {}
for net_name in algorithm.net_names:
optim_name = net_name.replace('net', 'optim')
if not hasattr(algorithm, optim_name): # only for trainable network, i.e. has an optim
continue
g_net = getattr(algorithm, net_name)
g_net.share_memory() # make net global
if dist_mode == 'shared': # use the same name to override the local net
global_nets[net_name] = g_net
else: # keep a separate reference for syncing
global_nets[f'global_{net_name}'] = g_net
# if optim is Global, set to override the local optim and its scheduler
optim = getattr(algorithm, optim_name)
if hasattr(optim, 'share_memory'):
optim.share_memory() # make optim global
global_nets[optim_name] = optim
if hasattr(optim, 'optimizer'): # for Lookahead with an inner optimizer
global_nets[f'{optim_name}_optimizer'] = optim.optimizer
lr_scheduler_name = net_name.replace('net', 'lr_scheduler')
lr_scheduler = getattr(algorithm, lr_scheduler_name)
global_nets[lr_scheduler_name] = lr_scheduler
logger.info(f'Initialized global_nets attr {list(global_nets.keys())} for Hogwild')
return global_nets
def set_global_nets(algorithm, global_nets):
'''For Hogwild, set attr built in init_global_nets above. Use in algorithm init.'''
# set attr first so algorithm always has self.global_{net} to pass into train_step
for net_name in algorithm.net_names:
setattr(algorithm, f'global_{net_name}', None)
# set attr created in init_global_nets
if global_nets is not None:
# handle inner-optimizer recovery
inner_opt_keys = [k for k in global_nets if k.endswith('_optimizer')]
for inner_opt_key in inner_opt_keys:
opt = global_nets[inner_opt_key.replace('_optimizer', '')] # optimizer which has a inner optimizer
setattr(opt, 'optimizer', global_nets.pop(inner_opt_key))
# set global nets and optims
util.set_attr(algorithm, global_nets)
logger.info(f'Set global_nets attr {list(global_nets.keys())} for Hogwild')
def push_global_grads(net, global_net):
'''Push gradients to global_net, call inside train_step between loss.backward() and optim.step()'''
for param, global_param in zip(net.parameters(), global_net.parameters()):
if global_param.grad is not None:
return # quick skip
global_param._grad = param.grad
| {
"content_hash": "9ac5b10a1af36abc6a8cbc0177c9d451",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 231,
"avg_line_length": 40.24657534246575,
"alnum_prop": 0.6365554799183117,
"repo_name": "kengz/Unity-Lab",
"id": "79244ad4ee49621e5aec239858951f0fc6311fb3",
"size": "14690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slm_lab/agent/net/net_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22638"
},
{
"name": "Shell",
"bytes": "6829"
}
],
"symlink_target": ""
} |
"""
Default settings for the ``mezzanine.blog`` app. Each of these can be
overridden in your project's settings module, just like regular
Django settings. The ``editable`` argument for each controls whether
the setting is editable via Django's admin.
Thought should be given to how a setting is actually used before
making it editable, as it may be inappropriate - for example settings
that are only read during startup shouldn't be editable, since changing
them would require an application reload.
"""
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import register_setting
register_setting(
name="BLOG_BITLY_USER",
label=_("bit.ly username"),
description=_("Username for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="BLOG_BITLY_KEY",
label=_("bit.ly key"),
description=_("Key for http://bit.ly URL shortening service."),
editable=True,
default="",
)
register_setting(
name="BLOG_USE_FEATURED_IMAGE",
description=_("Enable featured images in blog posts"),
editable=False,
default=False,
)
register_setting(
name="BLOG_URLS_USE_DATE",
label=_("Use date URLs"),
description=_("If ``True``, URLs for blog post include the month and "
"year. Eg: /blog/yyyy/mm/slug/"),
editable=False,
default=False,
)
register_setting(
name="BLOG_POST_PER_PAGE",
label=_("Blog posts per page"),
description=_("Number of blog posts shown on a blog listing page."),
editable=True,
default=5,
)
register_setting(
name="BLOG_MAX_POST_PER_PAGE",
label=_("Max blog posts per page"),
description=_("Maximum number of blog posts shown on a blog listing page."),
editable=True,
default=20,
)
register_setting(
name="BLOG_SLUG",
description=_("Slug of the page object for the blog."),
editable=False,
default="blog",
)
| {
"content_hash": "b00eff835683db572c8b16d5324e7478",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 80,
"avg_line_length": 27.014084507042252,
"alnum_prop": 0.6882168925964547,
"repo_name": "guibernardino/mezzanine",
"id": "c8e1eb7ca826f53fbaad489520b2995958472e55",
"size": "1918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/blog/defaults.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "22201"
},
{
"name": "JavaScript",
"bytes": "61430"
},
{
"name": "Python",
"bytes": "832496"
}
],
"symlink_target": ""
} |
"""Statistical functions."""
# pylint: disable=unused-import,line-too-long,g-importing-member
from tensorflow_probability.python.internal import all_util
from tensorflow_probability.python.stats.calibration import brier_decomposition
from tensorflow_probability.python.stats.calibration import brier_score
from tensorflow_probability.python.stats.calibration import expected_calibration_error
from tensorflow_probability.python.stats.calibration import expected_calibration_error_quantiles
from tensorflow_probability.python.stats.kendalls_tau import kendalls_tau
from tensorflow_probability.python.stats.leave_one_out import log_loomean_exp
from tensorflow_probability.python.stats.leave_one_out import log_loosum_exp
from tensorflow_probability.python.stats.leave_one_out import log_soomean_exp
from tensorflow_probability.python.stats.leave_one_out import log_soosum_exp
from tensorflow_probability.python.stats.moving_stats import assign_log_moving_mean_exp
from tensorflow_probability.python.stats.moving_stats import assign_moving_mean_variance
from tensorflow_probability.python.stats.moving_stats import moving_mean_variance_zero_debiased
from tensorflow_probability.python.stats.quantiles import count_integers
from tensorflow_probability.python.stats.quantiles import find_bins
from tensorflow_probability.python.stats.quantiles import histogram
from tensorflow_probability.python.stats.quantiles import percentile
from tensorflow_probability.python.stats.quantiles import quantiles
from tensorflow_probability.python.stats.ranking import quantile_auc
from tensorflow_probability.python.stats.sample_stats import auto_correlation
from tensorflow_probability.python.stats.sample_stats import cholesky_covariance
from tensorflow_probability.python.stats.sample_stats import correlation
from tensorflow_probability.python.stats.sample_stats import covariance
from tensorflow_probability.python.stats.sample_stats import cumulative_variance
from tensorflow_probability.python.stats.sample_stats import log_average_probs
from tensorflow_probability.python.stats.sample_stats import stddev
from tensorflow_probability.python.stats.sample_stats import variance
from tensorflow_probability.python.stats.sample_stats import windowed_mean
from tensorflow_probability.python.stats.sample_stats import windowed_variance
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
__all__ = [
'assign_log_moving_mean_exp',
'assign_moving_mean_variance',
'auto_correlation',
'brier_decomposition',
'brier_score',
'cholesky_covariance',
'correlation',
'count_integers',
'covariance',
'cumulative_variance',
'expected_calibration_error',
'expected_calibration_error_quantiles',
'find_bins',
'histogram',
'kendalls_tau',
'log_average_probs',
'log_loomean_exp',
'log_loosum_exp',
'log_soomean_exp',
'log_soosum_exp',
'moving_mean_variance_zero_debiased',
'percentile',
'quantile_auc',
'quantiles',
'stddev',
'variance',
'windowed_mean',
'windowed_variance',
]
all_util.remove_undocumented(__name__, __all__)
| {
"content_hash": "c1b333555cbe821b206677cb0670f5d5",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 96,
"avg_line_length": 46.13235294117647,
"alnum_prop": 0.8020401657634683,
"repo_name": "tensorflow/probability",
"id": "dd75111e11719433441662168c745b71ee98c8ce",
"size": "3815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_probability/python/stats/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55552121"
},
{
"name": "Python",
"bytes": "17339674"
},
{
"name": "Shell",
"bytes": "24852"
},
{
"name": "Starlark",
"bytes": "663851"
}
],
"symlink_target": ""
} |
from settings import CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET
import oauth2 as oauth
class SingletonTwAuthentication(object):
"""
Singleton Pattern for OAUTH Twitter Authentication Process
"""
_instance = None
_client = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(ACCESS_TOKEN,ACCESS_TOKEN_SECRET)
cls._instance = object.__new__(cls)
cls._client = oauth.Client(consumer, token)
return cls._instance
class TwAuthentication(SingletonTwAuthentication):
def get_client(self):
return self._client
| {
"content_hash": "9a23d951eab188a0f5b359283afb97fc",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 85,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.6648351648351648,
"repo_name": "Raul-diffindo/TwRestApiPlaces",
"id": "1b8c4d8a5bae9a1a1e066b93281c4dd4975f86c8",
"size": "729",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TwRestApiPlaces/tw_authentication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43400"
}
],
"symlink_target": ""
} |
import requests
import warnings
import sys
class Monitor:
def __init__(self, accountID, token):
self.token = token
self.pricingApi = "https://api-fxpractice.oanda.com/v3/accounts/"
self.pricingApi += accountID
self.pricingApi += "/pricing"
#currencies is a list of strings
def getBidsAndAsks(self, currencies):
headers = {"Authorization":"Bearer " + self.token}
params = {"instruments":currencies}
response = requests.get(self.pricingApi, headers=headers, params=params)
if response.status_code == 200:
return response.json()["prices"][0]
| {
"content_hash": "94921cbf648b43ea3b7c7db92157301c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 29.68421052631579,
"alnum_prop": 0.723404255319149,
"repo_name": "atanasAV/forex-trading-api",
"id": "15c597c94667a96ae9ab7a9f03b32cb2b1c65f1c",
"size": "564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tradingModule/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2546"
}
],
"symlink_target": ""
} |
import random
import json
random.seed()
def d6():
"""Simulate the roll of a 6 sided die"""
return random.randint(1, 6)
def two_d6():
"""Simulate the roll of two 6 sided die"""
return random.randint(1, 6) + random.randint(1, 6)
def d3():
"""Simulate the roll of a 3 sided die"""
return random.randint(1, 3)
with open('atmospheres.json', 'r') as f:
atmospheres = json.load(f)
with open('temperatures.json', "r") as f:
temperatures = json.load(f)
with open('governments.json', "r") as f:
governments = json.load(f)
with open('factionstrength.json', "r") as f:
fac_strength = json.load(f)
with open('cultures.json', "r") as f:
cultures = json.load(f)
with open('starportclass.json', "r") as f:
starport_classes = json.load(f)
with open('starporttype.json', "r") as f:
starport_types = json.load(f)
class Hex:
"""Hex Class to contain all the data for each hex of the subsector."""
def __init__(self, empty=True, name=""):
self.is_empty = empty
self.name = name
self.size = None
self.atmos_type = None
self.atmos_num = None
self.gravity = None
self.temperature = None
self.temp_num = None
self.hydro = None
self.pop = None
self.gov_num = None
self.gov_type = None
self.factions = []
self.culture_num = None
self.culture_type = None
self.law = None
self.starport_class = None
self.starport_type = None
self.tech = None
self.bases = []
self.travel_code = None
self.trade_codes = []
self.gas_giant = False
if not self.is_empty:
self.gen_size()
self.gen_atmos()
self.gen_temp()
self.gen_hydro()
self.gen_pop()
self.gen_gov()
self.gen_factions()
self.gen_culture()
self.gen_law()
self.gen_starport()
self.gen_tech()
self.gen_bases()
self.set_travel_code()
self.set_trade_codes()
self.gen_gas_giant()
def gen_size(self):
"""Calculate the primary world's size"""
self.size = two_d6() - 2
assert (self.size in range(11)), "Size set improperly"
self.set_grav()
def set_grav(self):
"""Set the gravity string for the primary world based on size"""
assert (self.size in range(11)), "Size not set"
if self.size <= 6:
self.gravity = "low"
elif self.size >= 10:
self.gravity = "high"
else:
self.gravity = "normal"
def gen_atmos(self):
"""Calculate the primary world's atmosphere based on size"""
assert (self.size in range(11)), "Size not set"
self.atmos_num = two_d6() - 7 + self.size
if self.atmos_num < 0:
self.atmos_num = 0
self.set_atmos_type()
def set_atmos_type(self):
"""Set the atmosphere type string based on the atmosphere"""
assert (self.atmos_num in range(16)), "Atmosphere not set" + str(self.atmos_num)
self.atmos_type = atmospheres[str(self.atmos_num)]
def gen_temp(self):
"""Calculate the primary world's temperature"""
self.temp_num = two_d6()
self.set_temp_name()
def set_temp_name(self):
"""Set the temperature name string based on the temperature"""
assert (self.temp_num in range(-2, 17)), "Temperature not set"
self.temperature = temperatures[str(self.temp_num)]
def gen_hydro(self):
"""Calculate the amount of water on the primary world based on size atmosphere and temperature"""
assert (self.size in range(11)), "Size not set"
assert (self.atmos_num in range(16)), "Atmosphere not set"
assert (self.temp_num in range(-2, 17)), "Temperature not set"
if self.size > 2:
self.hydro = 0
elif self.atmos_num in [0, 1, 10, 11, 12]:
self.hydro = two_d6() - 4
elif self.atmos_num not in [13, 15]:
if self.temperature == "Hot":
self.hydro = two_d6() - 9
elif self.temperature == "Boiling":
self.hydro = two_d6() - 13
else:
self.hydro = two_d6() - 7
else:
self.hydro = two_d6() - 7
if self.hydro < 0:
self.hydro = 0
def gen_pop(self):
"""Calculate the primary world's population"""
self.pop = two_d6() - 2
def gen_gov(self):
"""Calculate the primary world's government type based on population"""
assert (self.pop in range(16)), "Population not set"
self.gov_num = two_d6() - 7 + self.pop
if self.gov_num < 0:
self.gov_num = 0
self.set_gov_name()
def set_gov_name(self):
"""Set the government name string based on government"""
assert (self.gov_num in range(16)), "Government not set"
self.gov_type = governments[str(self.gov_num)]
def gen_factions(self):
"""Generate the primary world's number of factions and type of factions using government"""
assert (self.gov_num in range(16)), "Government not set"
fac_count = d3()
if self.gov_num in [0, 7]:
fac_count += 1
if self.gov_num >= 10:
fac_count -= 1
if fac_count > 0:
for i in range(1, fac_count + 1):
gov = two_d6() - 7 + self.pop
if gov < 0:
gov = 0
strength = two_d6()
self.factions.append([gov, strength, governments[str(gov)], fac_strength[str(strength)]])
def gen_culture(self):
"""Generate the primary world's culture"""
self.culture_num = d6() * 10 + d6()
self.set_culture_type()
def set_culture_type(self):
"""Set the cultyre type string based on culture"""
assert (self.culture_num in range(11, 67)), "Culture not set"
self.culture_type = cultures[str(self.culture_num)]
def gen_law(self):
"""Calculate the primary world's law level based on government"""
assert (self.gov_num in range(16)), "Government not set"
self.law = two_d6() - 7 + self.gov_num
if self.law < 0:
self.law = 0
if self.law > 9:
self.law = 9
def gen_starport(self):
"""Calculate the primary world's starport based on population"""
assert (self.pop in range(16)), "Population not set"
starport_num = two_d6()
if self.pop >= 10:
starport_num += 2
elif self.pop >= 8:
starport_num += 1
elif self.pop <= 2:
starport_num -= 2
elif self.pop <= 4:
starport_num -= 1
if starport_num < 2:
starport_num = 2
elif starport_num > 11:
starport_num = 11
self.starport_class = starport_classes[str(starport_num)]
self.set_starprot_type()
def set_starprot_type(self):
"""Set starport type string based on starport class"""
assert (self.starport_class in ["X", "A", "B", "C", "D", "E"]), "Starport class not set"
self.starport_type = starport_types[str(self.starport_class)]
def gen_tech(self):
"""Calculate the primary world's tech level"""
assert (self.starport_class in ["X", "A", "B", "C", "D", "E"]), "Starport class not set"
assert (self.size in range(11)), "Size not set"
assert (self.atmos_num in range(16)), "Atmosphere not set"
assert (self.hydro in range(16)), "Hydrographics not set"
assert (self.pop in range(16)), "Population not set"
assert (self.gov_num in range(16)), "Government not set"
self.tech = d6()
if self.starport_class == "X":
self.tech -= 4
elif self.starport_class == "A":
self.tech += 6
elif self.starport_class == "B":
self.tech += 4
elif self.starport_class == "C":
self.tech += 2
if self.size < 2:
self.tech += 2
elif self.size < 5:
self.tech += 1
if self.atmos_num in [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]:
self.tech += 1
if self.hydro in [0, 9]:
self.tech += 1
elif self.hydro == 10:
self.tech += 2
if self.pop in [1, 2, 3, 4, 5, 8]:
self.tech += 1
elif self.pop == 9:
self.tech += 2
elif self.pop == 10:
self.tech += 4
if self.gov_num in [0, 5]:
self.tech += 1
elif self.gov_num == 7:
self.tech += 2
elif self.gov_num in [13, 14]:
self.tech -= 2
if self.tech < 0:
self.tech = 0
def gen_bases(self):
"""Calculate the primary world's bases"""
assert (self.starport_class in ["X", "A", "B", "C", "D", "E"]), "Starport class not set"
if self.starport_class == "A":
if two_d6() > 8:
self.bases.append("N")
if two_d6() > 10:
self.bases.append("S")
if two_d6() > 8:
self.bases.append("R")
self.bases.append("T")
elif self.starport_class == "B":
if two_d6() > 8:
self.bases.append("N")
if two_d6() > 8:
self.bases.append("S")
if two_d6() > 10:
self.bases.append("R")
self.bases.append("T")
elif self.starport_class == "C":
if two_d6() > 8:
self.bases.append("S")
if two_d6() > 10:
self.bases.append("R")
if two_d6() > 10:
self.bases.append("T")
elif self.starport_class == "D":
if two_d6() > 7:
self.bases.append("S")
def set_travel_code(self):
"""Set the travel code based on atmosphere, government, and law."""
assert (self.atmos_num in range(16)), "Atmosphere not set"
assert (self.gov_num in range(16)), "Government not set"
assert (self.law in range(10)), "Law not set"
if self.atmos_num >= 10:
self.travel_code = "A"
elif self.gov_num in {0, 7, 10}:
self.travel_code = "A"
elif self.law in {0, 9}:
self.travel_code = "A"
def set_trade_codes(self):
"""Set the trade codes based on size, atmosphere, hydrographics, population, government, law, and tech."""
assert (self.size in range(11)), "Size not set"
assert (self.atmos_num in range(16)), "Atmosphere not set"
assert (self.hydro in range(16)), "Hydrographics not set"
assert (self.pop in range(16)), "Population not set"
assert (self.gov_num in range(16)), "Government not set"
assert (self.law in range(10)), "Law not set"
assert (self.tech in range(26)), "Tech not set " + str(self.tech)
if self.atmos_num in range(4, 10) and self.hydro in range(4, 9) and self.pop in range(5, 8):
self.trade_codes.append("Ag")
if self.size == 0 and self.atmos_num == 0 and self.hydro == 0:
self.trade_codes.append("As")
if self.pop == 0 and self.gov_num == 0 and self.law == 0:
self.trade_codes.append("Ba")
if self.atmos_num >= 2 and self.hydro == 0:
self.trade_codes.append("De")
if self.atmos_num >= 10 and self.hydro >= 1:
self.trade_codes.append("Fl")
if self.size in range(6, 8) and self.atmos_num in [5, 6, 8] and self.hydro in range(5, 8):
self.trade_codes.append("Ga")
if self.pop >= 9:
self.trade_codes.append("Hi")
if self.tech >= 12:
self.trade_codes.append("Ht")
if self.atmos_num in [0, 1] and self.hydro >= 1:
self.trade_codes.append("Ie")
if self.atmos_num in [0, 1, 2, 4, 7, 9] and self.pop >= 9:
self.trade_codes.append("In")
if self.pop <= 3:
self.trade_codes.append("Lo")
if self.tech <= 5:
self.trade_codes.append("Lt")
if self.atmos_num in range(0, 4) and self.hydro in range(0, 4) and self.pop >= 6:
self.trade_codes.append("Na")
if self.pop <= 6:
self.trade_codes.append("NI")
if self.atmos_num in range(2, 5) and self.hydro in range(0, 4):
self.trade_codes.append("Po")
if self.atmos_num in [6, 8] and self.pop in range(6, 9) and self.gov_num in range(4, 10):
self.trade_codes.append("Ri")
if self.atmos_num == 0:
self.trade_codes.append("Va")
if self.hydro >= 10:
self.trade_codes.append("Wa")
def gen_gas_giant(self):
"""Calculate the presence of a gas giant in the system"""
if two_d6() <= 10:
self.gas_giant = True
def __str__(self):
if self.is_empty:
return ""
assert (self.starport_class in ["X", "A", "B", "C", "D", "E"]), "Starport class not set " + self.starport_class
assert (self.size in range(11)), "Size not set"
assert (self.tech in range(21)), "Tech not set " + str(self.tech)
assert (self.atmos_num in range(16)), "Atmosphere not set"
assert (self.hydro in range(16)), "Hydrographics not set"
assert (self.pop in range(16)), "Population not set"
assert (self.gov_num in range(16)), "Government not set"
assert (self.law in range(10)), "Law not set"
code_str = "{0}{1:X}{2:X}{3:X}{4:X}{5:X}{6:X}-{7:<2}".format(self.starport_class, self.size, self.atmos_num,
self.hydro, self.pop, self.gov_num, self.law, self.tech)
for base in self.bases:
code_str += " {0}".format(base)
for trade in self.trade_codes:
code_str += " {0}".format(trade)
if self.travel_code:
code_str = "{0:30} {1}".format(code_str, self.travel_code)
return code_str
| {
"content_hash": "6c370775e5279abb78a9901b3e294aee",
"timestamp": "",
"source": "github",
"line_count": 387,
"max_line_length": 120,
"avg_line_length": 36.47803617571059,
"alnum_prop": 0.5341078132747751,
"repo_name": "Galidron/SectorBuild",
"id": "58b6740a36d4b07bec3543ac6006cfa2d4a9f864",
"size": "14117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16912"
}
],
"symlink_target": ""
} |
"""This module contains Google BigQuery to MSSQL operator."""
from typing import TYPE_CHECKING, List, Optional, Sequence, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.providers.google.cloud.utils.bigquery_get_data import bigquery_get_data
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class BigQueryToMsSqlOperator(BaseOperator):
"""
Fetches the data from a BigQuery table (alternatively fetch data for selected columns)
and insert that data into a MSSQL table.
.. note::
If you pass fields to ``selected_fields`` which are in different order than the
order of columns already in
BQ table, the data will still be in the order of BQ table.
For example if the BQ table has 3 columns as
``[A,B,C]`` and you pass 'B,A' in the ``selected_fields``
the data would still be of the form ``'A,B'`` and passed through this form
to MSSQL
**Example**: ::
transfer_data = BigQueryToMsSqlOperator(
task_id='task_id',
source_project_dataset_table='my-project.mydataset.mytable',
mssql_table='dest_table_name',
replace=True,
)
:param source_project_dataset_table: A dotted ``<project>.<dataset>.<table>``:
the big query table of origin
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param gcp_conn_id: reference to a specific Google Cloud hook.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param mssql_conn_id: reference to a specific mssql hook
:param database: name of database which overwrite defined one in connection
:param replace: Whether to replace instead of insert
:param batch_size: The number of rows to take in each batch
:param location: The location used for the operation.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = ('source_project_dataset_table', 'mssql_table', 'impersonation_chain')
def __init__(
self,
*,
source_project_dataset_table: str,
mssql_table: str,
selected_fields: Optional[Union[List[str], str]] = None,
gcp_conn_id: str = 'google_cloud_default',
mssql_conn_id: str = 'mssql_default',
database: Optional[str] = None,
delegate_to: Optional[str] = None,
replace: bool = False,
batch_size: int = 1000,
location: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.selected_fields = selected_fields
self.gcp_conn_id = gcp_conn_id
self.mssql_conn_id = mssql_conn_id
self.database = database
self.mssql_table = mssql_table
self.replace = replace
self.delegate_to = delegate_to
self.batch_size = batch_size
self.location = location
self.impersonation_chain = impersonation_chain
try:
_, self.dataset_id, self.table_id = source_project_dataset_table.split('.')
except ValueError:
raise ValueError(
f'Could not parse {source_project_dataset_table} as <project>.<dataset>.<table>'
) from None
self.source_project_dataset_table = source_project_dataset_table
def execute(self, context: 'Context') -> None:
big_query_hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location,
impersonation_chain=self.impersonation_chain,
)
mssql_hook = MsSqlHook(mssql_conn_id=self.mssql_conn_id, schema=self.database)
for rows in bigquery_get_data(
self.log,
self.dataset_id,
self.table_id,
big_query_hook,
self.batch_size,
self.selected_fields,
):
mssql_hook.insert_rows(
table=self.mssql_table,
rows=rows,
target_fields=self.selected_fields,
replace=self.replace,
)
| {
"content_hash": "504e3a150cec0e7eb4763549b163dc2e",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 107,
"avg_line_length": 42.95762711864407,
"alnum_prop": 0.648254093509568,
"repo_name": "lyft/incubator-airflow",
"id": "ca63ff0d99b500dc024f463336e053ca602f1ea3",
"size": "5856",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/providers/google/cloud/transfers/bigquery_to_mssql.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "161328"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jinja",
"bytes": "8565"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10019710"
},
{
"name": "Shell",
"bytes": "220780"
}
],
"symlink_target": ""
} |
"""
This will generate a movie data set of 1100 records.
These are the first 1100 movies which appear when querying the Freebase of type '/film/film'.
Here is the link to the freebase page - https://www.freebase.com/film/film?schema=
Usage - python3 film_data_generator.py
"""
import csv
import copy
import json
import codecs
import datetime
import urllib.parse
import urllib.request
import xml.etree.cElementTree as ET
from xml.dom import minidom
MAX_ITERATIONS=10 #10 limits it to 1100 docs
# You need an API Key by Google to run this
API_KEY = '<insert your Google developer API key>'
service_url = 'https://www.googleapis.com/freebase/v1/mqlread'
query = [{
"id": None,
"name": None,
"initial_release_date": None,
"directed_by": [],
"genre": [],
"type": "/film/film",
"initial_release_date>" : "2000"
}]
def gen_csv(filmlist):
filmlistDup = copy.deepcopy(filmlist)
#Convert multi-valued to % delimited string
for film in filmlistDup:
for key in film:
if isinstance(film[key], list):
film[key] = '|'.join(film[key])
keys = ['name', 'directed_by', 'genre', 'type', 'id', 'initial_release_date']
with open('films.csv', 'w', newline='', encoding='utf8') as csvfile:
dict_writer = csv.DictWriter(csvfile, keys)
dict_writer.writeheader()
dict_writer.writerows(filmlistDup)
def gen_json(filmlist):
filmlistDup = copy.deepcopy(filmlist)
with open('films.json', 'w') as jsonfile:
jsonfile.write(json.dumps(filmlist, indent=2))
def gen_xml(filmlist):
root = ET.Element("add")
for film in filmlist:
doc = ET.SubElement(root, "doc")
for key in film:
if isinstance(film[key], list):
for value in film[key]:
field = ET.SubElement(doc, "field")
field.set("name", key)
field.text=value
else:
field = ET.SubElement(doc, "field")
field.set("name", key)
field.text=film[key]
tree = ET.ElementTree(root)
with open('films.xml', 'w') as f:
f.write( minidom.parseString(ET.tostring(tree.getroot(),'utf-8')).toprettyxml(indent=" ") )
def do_query(filmlist, cursor=""):
params = {
'query': json.dumps(query),
'key': API_KEY,
'cursor': cursor
}
url = service_url + '?' + urllib.parse.urlencode(params)
data = urllib.request.urlopen(url).read().decode('utf-8')
response = json.loads(data)
for item in response['result']:
del item['type'] # It's always /film/film. No point of adding this.
try:
datetime.datetime.strptime(item['initial_release_date'], "%Y-%m-%d")
except ValueError:
#Date time not formatted properly. Keeping it simple by removing the date field from that doc
del item['initial_release_date']
filmlist.append(item)
return response.get("cursor")
if __name__ == "__main__":
filmlist = []
cursor = do_query(filmlist)
i=0
while(cursor):
cursor = do_query(filmlist, cursor)
i = i+1
if i==MAX_ITERATIONS:
break
gen_json(filmlist)
gen_csv(filmlist)
gen_xml(filmlist)
| {
"content_hash": "9f69d1bff4409a8c0b3c186f71e47b48",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 99,
"avg_line_length": 30.601941747572816,
"alnum_prop": 0.629758883248731,
"repo_name": "koneksys/KLD",
"id": "c36ba24b000a90dcf8d82e578c42d3c91b57c3d6",
"size": "3947",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "middleware/solr/example/films/film_data_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "291"
},
{
"name": "Batchfile",
"bytes": "51479"
},
{
"name": "CSS",
"bytes": "591037"
},
{
"name": "HTML",
"bytes": "64940813"
},
{
"name": "Java",
"bytes": "2813"
},
{
"name": "JavaScript",
"bytes": "2587494"
},
{
"name": "Python",
"bytes": "3947"
},
{
"name": "Ruby",
"bytes": "181387"
},
{
"name": "Shell",
"bytes": "99038"
},
{
"name": "Smarty",
"bytes": "12475"
},
{
"name": "XSLT",
"bytes": "128198"
}
],
"symlink_target": ""
} |
"""Interact with a REST API."""
import json
import logging
from urllib.parse import urljoin
from aiohttp import ClientHttpProcessingError, ClientSession
class API(ClientSession):
"""Interact with a REST API."""
URL = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.logger = logging.getLogger(__name__)
def _build(self, endpoint):
return urljoin(self.URL, endpoint.lstrip('/'))
async def request(self, method, endpoint, **kwargs):
"""Send HTTP request to an endpoint."""
url = self._build(endpoint)
async with super().request(method, url, **kwargs) as response:
try:
text = await response.text()
except json.decoder.JSONDecodeError:
self.logger.warning("Response was not JSON!")
self.logger.debug(response.text)
raise ClientHttpProcessingError("Response was not JSON!")
else:
self.logger.debug(
"{method} {endpoint} {data}:\n{code} {text}".format(
method=method, endpoint=endpoint, data=kwargs,
code=response.status, text=text))
return response
async def get(self, endpoint, **kwargs):
return await self.request("GET", endpoint, **kwargs)
async def options(self, endpoint, **kwargs):
return await self.request("OPTIONS", endpoint, **kwargs)
async def head(self, endpoint, **kwargs):
return await self.request("HEAD", endpoint, **kwargs)
async def post(self, endpoint, **kwargs):
return await self.request("POST", endpoint, **kwargs)
async def put(self, endpoint, **kwargs):
return await self.request("PUT", endpoint, **kwargs)
async def patch(self, endpoint, **kwargs):
return await self.request("PATCH", endpoint, **kwargs)
async def delete(self, endpoint, **kwargs):
return await self.request("DELETE", endpoint, **kwargs)
| {
"content_hash": "eb0bd8d8de6bf5dea1feaf45d6203fad",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 73,
"avg_line_length": 33,
"alnum_prop": 0.604073522106309,
"repo_name": "CactusDev/CactusBot",
"id": "d40b932702b67d5f9c5c2f1115fa84de145b1377",
"size": "2013",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cactusbot/services/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "153245"
}
],
"symlink_target": ""
} |
"""
This module contains Google BigQuery to BigQuery operator.
"""
import warnings
from typing import Dict, List, Optional, Union
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
from airflow.utils.decorators import apply_defaults
class BigQueryToBigQueryOperator(BaseOperator):
"""
Copies data from one BigQuery table to another.
.. seealso::
For more details about these parameters:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:param source_project_dataset_tables: One or more
dotted ``(project:|project.)<dataset>.<table>`` BigQuery tables to use as the
source data. If ``<project>`` is not included, project will be the
project defined in the connection json. Use a list if there are multiple
source tables. (templated)
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: ``(project:|project.)<dataset>.<table>`` (templated)
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param bigquery_conn_id: (Deprecated) The connection ID used to connect to Google Cloud Platform.
This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type bigquery_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have domain-wide
delegation enabled.
:type delegate_to: str
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param encryption_configuration: [Optional] Custom encryption configuration (e.g., Cloud KMS keys).
**Example**: ::
encryption_configuration = {
"kmsKeyName": "projects/testp/locations/us/keyRings/test-kr/cryptoKeys/test-key"
}
:type encryption_configuration: dict
:param location: The location used for the operation.
:type location: str
"""
template_fields = ('source_project_dataset_tables',
'destination_project_dataset_table', 'labels')
template_ext = ('.sql',)
ui_color = '#e6f0e4'
@apply_defaults
def __init__(self, # pylint: disable=too-many-arguments
source_project_dataset_tables: Union[List[str], str],
destination_project_dataset_table: str,
write_disposition: str = 'WRITE_EMPTY',
create_disposition: str = 'CREATE_IF_NEEDED',
gcp_conn_id: str = 'google_cloud_default',
bigquery_conn_id: Optional[str] = None,
delegate_to: Optional[str] = None,
labels: Optional[Dict] = None,
encryption_configuration: Optional[Dict] = None,
location: Optional[str] = None,
*args,
**kwargs) -> None:
super().__init__(*args, **kwargs)
if bigquery_conn_id:
warnings.warn(
"The bigquery_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = bigquery_conn_id
self.source_project_dataset_tables = source_project_dataset_tables
self.destination_project_dataset_table = destination_project_dataset_table
self.write_disposition = write_disposition
self.create_disposition = create_disposition
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.labels = labels
self.encryption_configuration = encryption_configuration
self.location = location
def execute(self, context):
self.log.info(
'Executing copy of %s into: %s',
self.source_project_dataset_tables, self.destination_project_dataset_table
)
hook = BigQueryHook(bigquery_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
location=self.location)
conn = hook.get_conn()
cursor = conn.cursor()
cursor.run_copy(
source_project_dataset_tables=self.source_project_dataset_tables,
destination_project_dataset_table=self.destination_project_dataset_table,
write_disposition=self.write_disposition,
create_disposition=self.create_disposition,
labels=self.labels,
encryption_configuration=self.encryption_configuration)
| {
"content_hash": "8ab042dcb8f58fd6424f887d4b2fe284",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 103,
"avg_line_length": 45.96296296296296,
"alnum_prop": 0.6518936341659952,
"repo_name": "mtagle/airflow",
"id": "f164cf2ce20a009a8cabca8e331e43f28442f455",
"size": "5751",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/operators/bigquery_to_bigquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "17280"
},
{
"name": "HTML",
"bytes": "148492"
},
{
"name": "JavaScript",
"bytes": "25360"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10006634"
},
{
"name": "Shell",
"bytes": "217011"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from robot.errors import DataError
from robot.utils import XmlWriter, NullMarkupWriter, get_timestamp, unic
from robot.version import get_full_version
from .loggerhelper import IsLogged
class XmlLogger(object):
def __init__(self, path, log_level='TRACE', generator='Robot'):
self._log_message_is_logged = IsLogged(log_level)
self._error_message_is_logged = IsLogged('WARN')
self._writer = self._get_writer(path, generator)
self._errors = []
def _get_writer(self, path, generator):
if path == 'NONE':
return NullMarkupWriter()
try:
writer = XmlWriter(path, encoding='UTF-8')
except EnvironmentError, err:
raise DataError("Opening output file '%s' failed: %s" %
(path, err.strerror))
writer.start('robot', {'generator': get_full_version(generator),
'generated': get_timestamp()})
return writer
def close(self):
self.start_errors()
for msg in self._errors:
self._write_message(msg)
self.end_errors()
self._writer.end('robot')
self._writer.close()
def set_log_level(self, level):
return self._log_message_is_logged.set_level(level)
def message(self, msg):
if self._error_message_is_logged(msg.level):
self._errors.append(msg)
def log_message(self, msg):
if self._log_message_is_logged(msg.level):
self._write_message(msg)
def _write_message(self, msg):
attrs = {'timestamp': msg.timestamp or 'N/A', 'level': msg.level}
if msg.html:
attrs['html'] = 'yes'
self._writer.element('msg', msg.message, attrs)
def start_keyword(self, kw):
self._writer.start('kw', {'name': kw.name, 'type': kw.type,
'timeout': str(kw.timeout)})
self._writer.element('doc', kw.doc)
self._write_list('arguments', 'arg', (unic(a) for a in kw.args))
def end_keyword(self, kw):
self._write_status(kw)
self._writer.end('kw')
def start_test(self, test):
self._writer.start('test', {'id': test.id, 'name': test.name,
'timeout': str(test.timeout)})
def end_test(self, test):
self._writer.element('doc', test.doc)
self._write_list('tags', 'tag', test.tags)
self._write_status(test, test.message,
{'critical': 'yes' if test.critical else 'no'})
self._writer.end('test')
def start_suite(self, suite):
attrs = {'id': suite.id, 'name': suite.name}
if suite.source:
attrs['source'] = suite.source
self._writer.start('suite', attrs)
def end_suite(self, suite):
self._writer.element('doc', suite.doc)
self._writer.start('metadata')
for name, value in suite.metadata.items():
self._writer.element('item', value, {'name': name})
self._writer.end('metadata')
self._write_status(suite, suite.message)
self._writer.end('suite')
def start_statistics(self, stats):
self._writer.start('statistics')
def end_statistics(self, stats):
self._writer.end('statistics')
def start_total_stats(self, total_stats):
self._writer.start('total')
def end_total_stats(self, total_stats):
self._writer.end('total')
def start_tag_stats(self, tag_stats):
self._writer.start('tag')
def end_tag_stats(self, tag_stats):
self._writer.end('tag')
def start_suite_stats(self, tag_stats):
self._writer.start('suite')
def end_suite_stats(self, tag_stats):
self._writer.end('suite')
def total_stat(self, stat):
self._stat(stat)
def suite_stat(self, stat):
self._stat(stat, stat.longname,
attrs={'id': stat.id, 'name': stat.name})
def tag_stat(self, stat):
self._stat(stat, attrs={'info': self._get_tag_stat_info(stat),
'links': self._get_tag_links(stat),
'doc': stat.doc,
'combined': stat.combined})
def _get_tag_links(self, stat):
return ':::'.join(':'.join([title, url]) for url, title in stat.links)
def _stat(self, stat, name=None, attrs=None):
attrs = attrs or {}
attrs['pass'] = str(stat.passed)
attrs['fail'] = str(stat.failed)
self._writer.element('stat', name or stat.name, attrs)
def _get_tag_stat_info(self, stat):
if stat.critical:
return 'critical'
if stat.non_critical:
return 'non-critical'
if stat.combined:
return 'combined'
return ''
def start_errors(self):
self._writer.start('errors')
def end_errors(self):
self._writer.end('errors')
def _write_list(self, container_tag, item_tag, items):
self._writer.start(container_tag)
for item in items:
self._writer.element(item_tag, item)
self._writer.end(container_tag)
def _write_status(self, item, message=None, extra_attrs=None):
attrs = {'status': item.status, 'starttime': item.starttime or 'N/A',
'endtime': item.endtime or 'N/A'}
if not (item.starttime and item.endtime):
attrs['elapsedtime'] = str(item.elapsedtime)
if extra_attrs:
attrs.update(extra_attrs)
self._writer.element('status', message, attrs)
| {
"content_hash": "3c35c5d47e710e3b4a9ea3b864165231",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 78,
"avg_line_length": 34.122699386503065,
"alnum_prop": 0.565623876303488,
"repo_name": "shellderp/sublime-robot-plugin",
"id": "d6a6c26469b8ab76cb30007130e49523adfacea8",
"size": "6168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/robot/output/xmllogger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10046"
},
{
"name": "HTML",
"bytes": "43202"
},
{
"name": "JavaScript",
"bytes": "44593"
},
{
"name": "Python",
"bytes": "1062356"
},
{
"name": "Ruby",
"bytes": "1983"
}
],
"symlink_target": ""
} |
'''Neural style transfer with Keras.
Before running this script, download the weights for the VGG16 model at:
https://drive.google.com/file/d/0Bz7KyqmuGsilT0J5dmRCM0ROVHc/view?usp=sharing
(source: https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3)
and make sure the variable `weights_path` in this script matches the location of the file.
Run the script with:
```
python neural_style_transfer.py path_to_your_base_image.jpg path_to_your_reference.jpg prefix_for_results
```
e.g.:
```
python neural_style_transfer.py img/tuebingen.jpg img/starry_night.jpg results/my_result
```
It is preferable to run this script on GPU, for speed.
If running on CPU, prefer the TensorFlow backend (much faster).
Example result: https://twitter.com/fchollet/status/686631033085677568
# Details
Style transfer consists in generating an image
with the same "content" as a base image, but with the
"style" of a different picture (typically artistic).
This is achieved through the optimization of a loss function
that has 3 components: "style loss", "content loss",
and "total variation loss":
- The total variation loss imposes local spatial continuity between
the pixels of the combination image, giving it visual coherence.
- The style loss is where the deep learning keeps in --that one is defined
using a deep convolutional neural network. Precisely, it consists in a sum of
L2 distances between the Gram matrices of the representations of
the base image and the style reference image, extracted from
different layers of a convnet (trained on ImageNet). The general idea
is to capture color/texture information at different spatial
scales (fairly large scales --defined by the depth of the layer considered).
- The content loss is a L2 distance between the features of the base
image (extracted from a deep layer) and the features of the combination image,
keeping the generated image close enough to the original one.
# References
- [A Neural Algorithm of Artistic Style](http://arxiv.org/abs/1508.06576)
'''
from __future__ import print_function
from scipy.misc import imread, imresize, imsave
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import time
import os
import argparse
import h5py
from keras.models import Sequential
from keras.layers import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import backend as K
parser = argparse.ArgumentParser(description='Neural style transfer with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('style_reference_image_path', metavar='ref', type=str,
help='Path to the style reference image.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
style_reference_image_path = args.style_reference_image_path
result_prefix = args.result_prefix
weights_path = 'vgg16_weights.h5'
# these are the weights of the different loss components
total_variation_weight = 1.
style_weight = 1.
content_weight = 0.025
# dimensions of the generated picture.
img_width = 400
img_height = 400
assert img_height == img_width, 'Due to the use of the Gram matrix, width and height must match.'
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path):
img = imresize(imread(image_path), (img_width, img_height))
img = img[:, :, ::-1].astype('float64')
img[:, :, 0] -= 103.939
img[:, :, 1] -= 116.779
img[:, :, 2] -= 123.68
img = img.transpose((2, 0, 1))
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
x = x.transpose((1, 2, 0))
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# get tensor representations of our images
base_image = K.variable(preprocess_image(base_image_path))
style_reference_image = K.variable(preprocess_image(style_reference_image_path))
# this will contain our generated image
combination_image = K.placeholder((1, 3, img_width, img_height))
# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_image,
style_reference_image,
combination_image], axis=0)
# build the VGG16 network with our 3 images as input
first_layer = ZeroPadding2D((1, 1))
first_layer.set_input(input_tensor, shape=(3, 3, img_width, img_height))
model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).'
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
outputs_dict = dict([(layer.name, layer.output) for layer in model.layers])
# compute the neural style loss
# first we need to define 4 util functions
# the gram matrix of an image tensor (feature-wise outer product)
def gram_matrix(x):
assert K.ndim(x) == 3
features = K.batch_flatten(x)
gram = K.dot(features, K.transpose(features))
return gram
# the "style loss" is designed to maintain
# the style of the reference image in the generated image.
# It is based on the gram matrices (which capture style) of
# feature maps from the style reference image
# and from the generated image
def style_loss(style, combination):
assert K.ndim(style) == 3
assert K.ndim(combination) == 3
S = gram_matrix(style)
C = gram_matrix(combination)
channels = 3
size = img_width * img_height
return K.sum(K.square(S - C)) / (4. * (channels ** 2) * (size ** 2))
# an auxiliary loss function
# designed to maintain the "content" of the
# base image in the generated image
def content_loss(base, combination):
return K.sum(K.square(combination - base))
# the 3rd loss function, total variation loss,
# designed to keep the generated image locally coherent
def total_variation_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
return K.sum(K.pow(a + b, 1.25))
# combine these loss functions into a single scalar
loss = K.variable(0.)
layer_features = outputs_dict['conv4_2']
base_image_features = layer_features[0, :, :, :]
combination_features = layer_features[2, :, :, :]
loss += content_weight * content_loss(base_image_features,
combination_features)
feature_layers = ['conv1_1', 'conv2_1', 'conv3_1', 'conv4_1', 'conv5_1']
for layer_name in feature_layers:
layer_features = outputs_dict[layer_name]
style_reference_features = layer_features[1, :, :, :]
combination_features = layer_features[2, :, :, :]
sl = style_loss(style_reference_features, combination_features)
loss += (style_weight / len(feature_layers)) * sl
loss += total_variation_weight * total_variation_loss(combination_image)
# get the gradients of the generated image wrt the loss
grads = K.gradients(loss, combination_image)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([combination_image], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, 3, img_width, img_height))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the neural style loss
x = np.random.uniform(0, 255, (1, 3, img_width, img_height))
x[0, 0, :, :] -= 103.939
x[0, 1, :, :] -= 116.779
x[0, 2, :, :] -= 123.68
for i in range(10):
print('Start of iteration', i)
start_time = time.time()
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=20)
print('Current loss value:', min_val)
# save current generated image
img = deprocess_image(x.copy().reshape((3, img_width, img_height)))
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
| {
"content_hash": "b5334ea765f050ffdac01c45436818e8",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 105,
"avg_line_length": 37.996688741721854,
"alnum_prop": 0.6918518518518518,
"repo_name": "relh/keras",
"id": "e457b72ecbd79b042e52c53908a72a9805aba3c7",
"size": "11475",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/neural_style_transfer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "697"
},
{
"name": "Python",
"bytes": "905485"
}
],
"symlink_target": ""
} |
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser, User
from django.test import RequestFactory, TestCase
from logger.middleware import LogAuthenticatedRequestMiddleware
class TestLogRequestMiddleware(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.middleware = LogAuthenticatedRequestMiddleware()
def test_authenticated(self):
request = self.factory.get('/admin')
request.user = User.objects.create()
with patch('logger.models.LogManager.from_request') as from_request:
response = self.middleware.process_request(request)
from_request.assert_called_once_with(request)
self.assertIsNone(response)
def test_anonymous(self):
request = self.factory.get('/admin')
request.user = AnonymousUser()
with patch('logger.models.LogManager.from_request') as from_request:
response = self.middleware.process_request(request)
self.assertFalse(from_request.called)
self.assertIsNone(response)
| {
"content_hash": "aa2fd4279c6eba340bbb356689c6a946",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 76,
"avg_line_length": 33.46875,
"alnum_prop": 0.7105508870214753,
"repo_name": "incuna/incuna-request-logging",
"id": "58bd9fa426835a2e5d2eb40baa8806c8e9134e39",
"size": "1071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_logger/test_middleware.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "160"
},
{
"name": "Python",
"bytes": "6220"
}
],
"symlink_target": ""
} |
"""Token bucket implementation for rate limiting."""
from __future__ import absolute_import, unicode_literals
from collections import deque
from kombu.five import monotonic
__all__ = ('TokenBucket',)
class TokenBucket(object):
"""Token Bucket Algorithm.
See Also:
https://en.wikipedia.org/wiki/Token_Bucket
Most of this code was stolen from an entry in the ASPN Python Cookbook:
https://code.activestate.com/recipes/511490/
Warning:
Thread Safety: This implementation is not thread safe.
Access to a `TokenBucket` instance should occur within the critical
section of any multithreaded code.
"""
#: The rate in tokens/second that the bucket will be refilled.
fill_rate = None
#: Maximum number of tokens in the bucket.
capacity = 1
#: Timestamp of the last time a token was taken out of the bucket.
timestamp = None
def __init__(self, fill_rate, capacity=1):
self.capacity = float(capacity)
self._tokens = capacity
self.fill_rate = float(fill_rate)
self.timestamp = monotonic()
self.contents = deque()
def add(self, item):
self.contents.append(item)
def pop(self):
return self.contents.popleft()
def clear_pending(self):
self.contents.clear()
def can_consume(self, tokens=1):
"""Check if one or more tokens can be consumed.
Returns:
bool: true if the number of tokens can be consumed
from the bucket. If they can be consumed, a call will also
consume the requested number of tokens from the bucket.
Calls will only consume `tokens` (the number requested)
or zero tokens -- it will never consume a partial number
of tokens.
"""
if tokens <= self._get_tokens():
self._tokens -= tokens
return True
return False
def expected_time(self, tokens=1):
"""Return estimated time of token availability.
Returns:
float: the time in seconds.
"""
_tokens = self._get_tokens()
tokens = max(tokens, _tokens)
return (tokens - _tokens) / self.fill_rate
def _get_tokens(self):
if self._tokens < self.capacity:
now = monotonic()
delta = self.fill_rate * (now - self.timestamp)
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
| {
"content_hash": "f7b4a16df07f0958242d8b48a247cac8",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 79,
"avg_line_length": 30.518072289156628,
"alnum_prop": 0.608764311093565,
"repo_name": "urbn/kombu",
"id": "4c84834040ac1db83ec959014b6152ef67667322",
"size": "2533",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kombu/utils/limits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1844"
},
{
"name": "Makefile",
"bytes": "3788"
},
{
"name": "Python",
"bytes": "1082894"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from datetime import date
from unittest import skipUnless
from django.apps import apps
from django.conf import settings
from django.contrib.sitemaps import Sitemap, GenericSitemap
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import ignore_warnings, modify_settings, override_settings
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.formats import localize
from django.utils._os import upath
from django.utils.translation import activate, deactivate
from .base import TestModel, SitemapTestsBase
class HTTPSitemapTests(SitemapTestsBase):
@ignore_warnings(category=RemovedInDjango20Warning)
def test_simple_sitemap_index(self):
"A simple sitemap index can be rendered"
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@ignore_warnings(category=RemovedInDjango20Warning)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
}])
def test_simple_sitemap_custom_index(self):
"A simple sitemap index can be rendered with a custom template"
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/custom-index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/simple/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap_section(self):
"A simple sitemap section can be rendered"
response = self.client.get('/simple/sitemap-simple.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_simple_sitemap(self):
"A simple sitemap can be rendered"
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
}])
def test_simple_custom_sitemap(self):
"A simple sitemap can be rendered with a custom template"
response = self.client.get('/simple/custom-sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This is a customised template -->
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>%s/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % (self.base_url, date.today())
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
def test_sitemap_last_modified(self):
"Tests that Last-Modified header is set correctly"
response = self.client.get('/lastmod/sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 10:00:00 GMT')
def test_sitemap_last_modified_date(self):
"""
The Last-Modified header should be support dates (without time).
"""
response = self.client.get('/lastmod/date-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 00:00:00 GMT')
def test_sitemap_last_modified_tz(self):
"""
The Last-Modified header should be converted from timezone aware dates
to GMT.
"""
response = self.client.get('/lastmod/tz-sitemap.xml')
self.assertEqual(response['Last-Modified'], 'Wed, 13 Mar 2013 15:00:00 GMT')
def test_sitemap_last_modified_missing(self):
"Tests that Last-Modified header is missing when sitemap has no lastmod"
response = self.client.get('/generic/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
def test_sitemap_last_modified_mixed(self):
"Tests that Last-Modified header is omitted when lastmod not on all items"
response = self.client.get('/lastmod-mixed/sitemap.xml')
self.assertFalse(response.has_header('Last-Modified'))
@skipUnless(settings.USE_I18N, "Internationalization is not enabled")
@override_settings(USE_L10N=True)
def test_localized_priority(self):
"The priority value should not be localized (Refs #14164)"
activate('fr')
self.assertEqual('0,3', localize(0.3))
# Retrieve the sitemap. Check that priorities
# haven't been rendered in localized format
response = self.client.get('/simple/sitemap.xml')
self.assertContains(response, '<priority>0.5</priority>')
self.assertContains(response, '<lastmod>%s</lastmod>' % date.today())
deactivate()
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_requestsite_sitemap(self):
# Make sure hitting the flatpages sitemap without the sites framework
# installed doesn't raise an exception.
response = self.client.get('/simple/sitemap.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>http://testserver/location/</loc><lastmod>%s</lastmod><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""" % date.today()
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@skipUnless(apps.is_installed('django.contrib.sites'),
"django.contrib.sites app not installed.")
def test_sitemap_get_urls_no_site_1(self):
"""
Check we get ImproperlyConfigured if we don't pass a site object to
Sitemap.get_urls and no Site objects exist
"""
Site.objects.all().delete()
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sitemap_get_urls_no_site_2(self):
"""
Check we get ImproperlyConfigured when we don't pass a site object to
Sitemap.get_urls if Site objects exists, but the sites framework is not
actually installed.
"""
self.assertRaises(ImproperlyConfigured, Sitemap().get_urls)
def test_sitemap_item(self):
"""
Check to make sure that the raw item is included with each
Sitemap.get_url() url result.
"""
test_sitemap = GenericSitemap({'queryset': TestModel.objects.all()})
def is_testmodel(url):
return isinstance(url['item'], TestModel)
item_in_url_info = all(map(is_testmodel, test_sitemap.get_urls()))
self.assertTrue(item_in_url_info)
def test_cached_sitemap_index(self):
"""
Check that a cached sitemap index can be rendered (#2713).
"""
response = self.client.get('/cached/index.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<sitemapindex xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<sitemap><loc>%s/cached/sitemap-simple.xml</loc></sitemap>
</sitemapindex>
""" % self.base_url
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
@ignore_warnings(category=RemovedInDjango20Warning)
def test_x_robots_sitemap(self):
# The URL for views.sitemap in tests/urls/http.py has been updated
# with a name but since reversing by Python path is tried first
# before reversing by name and works since we're giving
# name='django.contrib.sitemaps.views.sitemap', we need to silence
# the erroneous warning until reversing by dotted path is removed.
# The test will work without modification when it's removed.
response = self.client.get('/simple/index.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
response = self.client.get('/simple/sitemap.xml')
self.assertEqual(response['X-Robots-Tag'], 'noindex, noodp, noarchive')
def test_empty_sitemap(self):
response = self.client.get('/empty/sitemap.xml')
self.assertEqual(response.status_code, 200)
@override_settings(LANGUAGES=(('en', 'English'), ('pt', 'Portuguese')))
def test_simple_i18nsitemap_index(self):
"A simple i18n sitemap index can be rendered"
response = self.client.get('/simple/i18n.xml')
expected_content = """<?xml version="1.0" encoding="UTF-8"?>
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">
<url><loc>{0}/en/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url><url><loc>{0}/pt/i18n/testmodel/{1}/</loc><changefreq>never</changefreq><priority>0.5</priority></url>
</urlset>
""".format(self.base_url, self.i18n_model.pk)
self.assertXMLEqual(response.content.decode('utf-8'), expected_content)
| {
"content_hash": "863b48c7156443d595d6ff7e4521d98c",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 204,
"avg_line_length": 48.01793721973094,
"alnum_prop": 0.6804258498319014,
"repo_name": "iambibhas/django",
"id": "dde9e97c01077e5758e71371bd7bf2cac6c763ee",
"size": "10708",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "django/contrib/sitemaps/tests/test_http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53429"
},
{
"name": "JavaScript",
"bytes": "103687"
},
{
"name": "Makefile",
"bytes": "5765"
},
{
"name": "Python",
"bytes": "10481639"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
} |
"""
"""
from __future__ import unicode_literals
import re
from .selection import SelectionType
__all__ = ('Document',)
# Regex for finding the "words" in documents. (We consider a group of alnum
# characters a word, but also a group of special characters a word, as long as
# it doesn't contain a space.)
_FIND_WORD_RE = re.compile('([a-zA-Z0-9_]+|[^a-zA-Z0-9_\s]+)')
_FIND_CURRENT_WORD_RE = re.compile('^([a-zA-Z0-9_]+|[^a-zA-Z0-9_\s]+)')
class Document(object):
"""
This is a immutable class around the text and cursor position, and contains
methods for querying this data, e.g. to give the text before the cursor.
This class is usually instantiated by a :class:`~prompt_toolkit.line.Line`
object, and accessed as the `document` property of that class.
:param text: string
:param cursor_position: int
:param selection: :class:`SelectionState`
"""
__slots__ = ('text', 'cursor_position', 'selection')
def __init__(self, text='', cursor_position=0, selection=None):
self.text = text
self.cursor_position = cursor_position
self.selection = selection
@property
def current_char(self):
""" Return character under cursor, or None """
return self._get_char_relative_to_cursor(0)
@property
def char_before_cursor(self):
""" Return character before the cursor, or None """
return self._get_char_relative_to_cursor(-1)
@property
def text_before_cursor(self):
return self.text[:self.cursor_position:]
@property
def text_after_cursor(self):
return self.text[self.cursor_position:]
@property
def current_line_before_cursor(self):
""" Text from the start of the line until the cursor. """
return self.text_before_cursor.split('\n')[-1]
@property
def current_line_after_cursor(self):
""" Text from the cursor until the end of the line. """
return self.text_after_cursor.split('\n')[0]
@property
def lines(self):
"""
Array of all the lines.
"""
return self.text.split('\n')
@property
def lines_from_current(self):
"""
Array of the lines starting from the current line, until the last line.
"""
return self.lines[self.cursor_position_row:]
@property
def line_count(self):
""" Return the number of lines in this document. If the document ends
with a trailing \n, that counts as the beginning of a new line. """
return len(self.lines)
@property
def current_line(self):
""" Return the text on the line where the cursor is. (when the input
consists of just one line, it equals `text`. """
return self.current_line_before_cursor + self.current_line_after_cursor
@property
def leading_whitespace_in_current_line(self):
""" The leading whitespace in the left margin of the current line. """
current_line = self.current_line
length = len(current_line) - len(current_line.lstrip())
return current_line[:length]
def _get_char_relative_to_cursor(self, offset=0):
"""
Return character relative to cursor position, or empty string
"""
try:
return self.text[self.cursor_position + offset]
except IndexError:
return ''
@property
def cursor_position_row(self):
"""
Current row. (0-based.)
"""
return len(self.text_before_cursor.split('\n')) - 1
@property
def cursor_position_col(self):
"""
Current column. (0-based.)
"""
return len(self.current_line_before_cursor)
def translate_index_to_position(self, index): # TODO: make this 0-based indexed!!!
"""
Given an index for the text, return the corresponding (row, col) tuple.
"""
text_before_position = self.text[:index]
row = len(text_before_position.split('\n'))
col = len(text_before_position.split('\n')[-1])
return row, col
def translate_row_col_to_index(self, row, col):
"""
Given a (row, col) tuple, return the corresponding index.
(Row and col params are 0-based.)
"""
return len('\n'.join(self.lines[:row])) + len('\n') + col
@property
def is_cursor_at_the_end(self):
""" True when the cursor is at the end of the text. """
return self.cursor_position == len(self.text)
@property
def is_cursor_at_the_end_of_line(self):
""" True when the cursor is at the end of this line. """
return self.cursor_position_col == len(self.current_line)
def has_match_at_current_position(self, sub):
"""
`True` when this substring is found at the cursor position.
"""
return self.text[self.cursor_position:].find(sub) == 0
def find(self, sub, in_current_line=False, include_current_position=False, count=1): # TODO: rename to `find_forwards`
"""
Find `text` after the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
"""
if in_current_line:
text = self.current_line_after_cursor
else:
text = self.text_after_cursor
if not include_current_position:
if len(text) == 0:
return # (Otherwise, we always get a match for the empty string.)
else:
text = text[1:]
iterator = re.finditer(re.escape(sub), text)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
if include_current_position:
return match.start(0)
else:
return match.start(0) + 1
except StopIteration:
pass
def find_all(self, sub):
"""
Find all occurances of the substring. Return a list of absolute
positions in the document.
"""
return [a.start() for a in re.finditer(re.escape(sub), self.text)]
def find_backwards(self, sub, in_current_line=False, count=1):
"""
Find `text` before the cursor, return position relative to the cursor
position. Return `None` if nothing was found.
:param count: Find the n-th occurance.
"""
if in_current_line:
before_cursor = self.current_line_before_cursor[::-1]
else:
before_cursor = self.text_before_cursor[::-1]
iterator = re.finditer(re.escape(sub[::-1]), before_cursor)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return - match.start(0) - len(sub)
except StopIteration:
pass
def get_word_before_cursor(self):
"""
Give the word before the cursor.
If we have whitespace before the cursor this returns an empty string.
"""
if self.text_before_cursor[-1:].isspace():
return ''
else:
return self.text_before_cursor[self.find_start_of_previous_word():]
def find_start_of_previous_word(self, count=1):
"""
Return an index relative to the cursor position pointing to the start
of the previous word. Return `None` if nothing was found.
"""
# Reverse the text before the cursor, in order to do an efficient
# backwards search.
text_before_cursor = self.text_before_cursor[::-1]
iterator = _FIND_WORD_RE.finditer(text_before_cursor)
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return - match.end(1)
except StopIteration:
pass
def find_boundaries_of_current_word(self):
"""
Return the relative boundaries (startpos, endpos) of the current word under the
cursor. (This is at the current line, because line boundaries obviously
don't belong to any word.)
If not on a word, this returns (0,0)
"""
text_before_cursor = self.current_line_before_cursor[::-1]
text_after_cursor = self.current_line_after_cursor
match_before = _FIND_CURRENT_WORD_RE.search(text_before_cursor)
match_after = _FIND_CURRENT_WORD_RE.search(text_after_cursor)
return (
- match_before.end(1) if match_before else 0,
match_after.end(1) if match_after else 0
)
def find_next_word_beginning(self, count=1):
"""
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
"""
iterator = _FIND_WORD_RE.finditer(self.text_after_cursor)
try:
for i, match in enumerate(iterator):
# Take first match, unless it's the word on which we're right now.
if i == 0 and match.start(1) == 0:
count += 1
if i + 1 == count:
return match.start(1)
except StopIteration:
pass
def find_next_word_ending(self, include_current_position=False, count=1):
"""
Return an index relative to the cursor position pointing to the end
of the next word. Return `None` if nothing was found.
"""
if include_current_position:
text = self.text_after_cursor
else:
text = self.text_after_cursor[1:]
iterable = _FIND_WORD_RE.finditer(text)
try:
for i, match in enumerate(iterable):
if i + 1 == count:
value = match.end(1)
if include_current_position:
return value
else:
return value + 1
except StopIteration:
pass
def find_previous_word_beginning(self, count=1):
"""
Return an index relative to the cursor position pointing to the start
of the next word. Return `None` if nothing was found.
"""
iterator = _FIND_WORD_RE.finditer(self.text_before_cursor[::-1])
try:
for i, match in enumerate(iterator):
if i + 1 == count:
return - match.end(1)
except StopIteration:
pass
def find_next_matching_line(self, match_func):
"""
Look downwards for empty lines.
Return the line index, relative to the current line.
"""
for index, line in enumerate(self.lines[self.cursor_position_row + 1:]):
if match_func(line):
return 1 + index
def find_previous_matching_line(self, match_func):
"""
Look upwards for empty lines.
Return the line index, relative to the current line.
"""
for index, line in enumerate(self.lines[:self.cursor_position_row][::-1]):
if match_func(line):
return -1 - index
def get_cursor_left_position(self, count=1):
"""
Relative position for cursor left.
"""
return - min(self.cursor_position_col, count)
def get_cursor_right_position(self, count=1):
"""
Relative position for cursor_right.
"""
return min(count, len(self.current_line_after_cursor))
def get_cursor_up_position(self, count=1): # TODO: implement `count`
"""
Return the relative cursor position (character index) where we would be if the
user pressed the arrow-up button.
"""
assert count >= 1
count = min(self.text_before_cursor.count('\n'), count)
if count:
pos = self.cursor_position_col
lines = self.text_before_cursor.split('\n')
skip_lines = '\n'.join(lines[-count-1:])
new_line = lines[-count-1]
# When the current line is longer then the previous, move to the
# last character of the previous line.
if pos > len(new_line):
return - len(skip_lines) + len(new_line)
# Otherwise find the corresponding position in the previous line.
else:
return - len(skip_lines) + pos
return 0
def get_cursor_down_position(self, count=1):
"""
Return the relative cursor position (character index) where we would be if the
user pressed the arrow-down button.
"""
assert count >= 1
count = min(self.text_after_cursor.count('\n'), count)
if count:
pos = self.cursor_position_col
lines = self.text_after_cursor.split('\n')
skip_lines = '\n'.join(lines[:count])
new_line = lines[count]
# When the current line is longer then the previous, move to the
# last character of the next line.
if pos > len(new_line):
return len(skip_lines) + len(new_line) + 1
# Otherwise find the corresponding position in the next line.
else:
return len(skip_lines) + pos + 1
return 0
@property
def matching_bracket_position(self):
"""
Return relative cursor position of matching [, (, { or < bracket.
"""
stack = 1
for A, B in '()', '[]', '{}', '<>':
if self.current_char == A:
for i, c in enumerate(self.text_after_cursor[1:]):
if c == A: stack += 1
elif c == B: stack -= 1
if stack == 0:
return i + 1
elif self.current_char == B:
for i, c in enumerate(reversed(self.text_before_cursor)):
if c == B: stack += 1
elif c == A: stack -= 1
if stack == 0:
return - (i + 1)
return 0
@property
def home_position(self):
""" Relative position for the start of the document. """
return - self.cursor_position
@property
def end_position(self):
""" Relative position for the end of the document. """
return len(self.text) - self.cursor_position
def get_start_of_line_position(self, after_whitespace=False):
""" Relative position for the end of this line. """
if after_whitespace:
current_line = self.current_line
return len(current_line) - len(current_line.lstrip()) - self.cursor_position_col
else:
return - len(self.current_line_before_cursor)
def get_end_of_line_position(self):
""" Relative position for the end of this line. """
return len(self.current_line_after_cursor)
def get_column_cursor_position(self, column):
"""
Return the relative cursor position for this column at the current
line. (It will stay between the boundaries of the line in case of a
larger number.)
"""
line_length = len(self.current_line)
current_column = self.cursor_position_col
column = max(0, min(line_length, column))
return column - current_column
def selection_range(self):
"""
Return (from, to) tuple of the selection or `None` if nothing was selected.
start and end position are always included in the selection.
"""
if self.selection:
from_, to = sorted([self.cursor_position, self.selection.original_cursor_position])
# In case of a LINES selection, go to the start/end of the lines.
if self.selection.type == SelectionType.LINES:
from_ = max(0, self.text[:from_].rfind('\n') + 1)
if self.text[to:].find('\n') >= 0:
to += self.text[to:].find('\n')
else:
to = len(self.text)
return from_, to
def empty_line_count_at_the_end(self):
"""
Return number of empty lines at the end of the document.
"""
count = 0
for line in self.lines[::-1]:
if not line or line.isspace():
count += 1
else:
break
return count
| {
"content_hash": "6dd778b9e40f5b2e87ba5119c8f05116",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 123,
"avg_line_length": 33.27902240325866,
"alnum_prop": 0.5644430844553243,
"repo_name": "Carreau/python-prompt-toolkit",
"id": "20e7ef946c3047db7f073a3ef8972bf3135116b6",
"size": "16340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prompt_toolkit/document.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "354355"
},
{
"name": "Shell",
"bytes": "6699"
}
],
"symlink_target": ""
} |
import rospy
from std_msgs.msg import (
Empty,
String,
)
from geometry_msgs.msg import PoseStamped, Pose
from jsk_rviz_plugins.msg import OverlayText
from jsk_2015_05_baxter_apc.msg import *
from jsk_2015_05_baxter_apc.srv import *
class DemoReal(object):
"""Demo for video sending to Amazon Official to get Kiva & Items"""
def __init__(self):
rospy.init_node('demo_real')
# publishers
self.pb_rviz_msg = rospy.Publisher('/semi/rviz_msg', OverlayText)
# self.pb_get_item = rospy.Publisher('/semi/get_item', Empty)
# properties
self.qrcode_info = {}
self.target_bin = ''
def cl_qrcode_reader(self):
"""QR code reader to get the position of bins"""
rospy.logwarn("======================== cl_qrcode_reader ========================")
rospy.wait_for_service('/semi/qrcode_pos')
try:
qrcode_reader = rospy.ServiceProxy('/semi/qrcode_pos', QrStampsrv)
resp = qrcode_reader(Empty)
for stamp in resp.qrstamps.qrcode_stampes:
rospy.logwarn(stamp.label.data)
self.qrcode_info[stamp.label.data] = stamp.qrcode_pose_stamp
return resp
except rospy.ServiceException, e:
rospy.logwarn('/semi/qrcode_pos Service call failed: {0}'.format(e))
def cl_get_item(self):
rospy.logwarn('move to ' + self.target_bin + " =================================")
rospy.wait_for_service('/move_right_arm_service')
try:
get_item = rospy.ServiceProxy('/move_right_arm_service', MoveArm)
# get_item = rospy.ServiceProxy('/semi/move_right_arm', MoveArm)
get_item(self.qrcode_info[self.target_bin])
# rospy.logwarn(self.qrcode_info)
# rospy.logwarn(get_item)
# rospy.logwarn(self.qrcode_info[(self.target_bin)])
# resp = get_item(self.qrcode_info[(self.target_bin)])
# rospy.logwarn('get_item ========================================================')
# rospy.logwarn(resp)
# rospy.wait_for_service('/semi/get_item')
# get_item = rospy.ServiceProxy('/semi/get_item', Cue)
# resp = get_item()
# resp = get_item(PoseStamped(self.qrcode_info[self.target_bin]))
# if resp.succeeded is False:
# rospy.logwarn('move arm to {0} is failed'.format(self.target_bin))
except rospy.ServiceException, e:
rospy.logwarn('/semi/get_item Service call failed: {0}'.format(e))
def cl_release_item(self):
rospy.wait_for_service('/semi/release_item')
rospy.logwarn("===============================release item =====================================")
try:
release_item = rospy.ServiceProxy('/semi/release_item', ReleaseItem)
resp = release_item(Empty)
if resp.succeeded is False:
rospy.logwarn('release item is failed'.format(self.target_bin))
except rospy.ServiceException, e:
rospy.logwarn('/semi/release_item Service call failed: {0}'.format(e))
def main(self):
# read QR code
self.pb_rviz_msg.publish(OverlayText(text='Started reading QR code and get position of each bins.'))
succeeded = self.cl_qrcode_reader()
# Get item
self.target_bin = 'bin_E'
self.pb_rviz_msg.publish(OverlayText(text='Getting item in bin name: {0}.'.format(self.target_bin)))
succeeded = self.cl_get_item()
# Release item
self.pb_rviz_msg.publish(OverlayText(text='Releasing item.'))
self.cl_release_item()
self.pb_rviz_msg.publish(OverlayText(text="baxter waiting"))
if __name__ == '__main__':
demo_real = DemoReal()
demo_real.main()
| {
"content_hash": "68ba25d96965757d30de2cfb6bdde227",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 108,
"avg_line_length": 42.79775280898876,
"alnum_prop": 0.5707534786033079,
"repo_name": "pazeshun/jsk_apc",
"id": "6ed3096b057c0b35cb3c5842211a18f84d5e2c21",
"size": "3923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "archives/demo_real.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "101871"
},
{
"name": "CMake",
"bytes": "42995"
},
{
"name": "Common Lisp",
"bytes": "695864"
},
{
"name": "Dockerfile",
"bytes": "1503"
},
{
"name": "HTML",
"bytes": "6364"
},
{
"name": "Python",
"bytes": "406153"
},
{
"name": "Shell",
"bytes": "4475"
}
],
"symlink_target": ""
} |
from base64 import b64encode
from flask_pluginengine import current_plugin
from indico_piwik.queries.base import PiwikQueryReportEventBase
class PiwikQueryReportEventGraphBase(PiwikQueryReportEventBase):
"""Base Piwik query for retrieving PNG graphs"""
def call(self, apiModule, apiAction, height=None, width=None, graphType='verticalBar', **query_params):
if height is not None:
query_params['height'] = height
if width is not None:
query_params['width'] = width
return super().call(method='ImageGraph.get', apiModule=apiModule, apiAction=apiAction, aliasedGraph='1',
graphType=graphType, **query_params)
def get_result(self):
"""Perform the call and return the graph data
:return: Encoded PNG graph data string to be inserted in a `src`
atribute of a HTML img tag.
"""
png = self.call()
if png is None:
return
if png.startswith(b'GD extension must be loaded'):
current_plugin.logger.warning('Piwik server answered on ImageGraph.get: %s', png)
return
return 'data:image/png;base64,{}'.format(b64encode(png).decode())
class PiwikQueryReportEventGraphCountries(PiwikQueryReportEventGraphBase):
def call(self, **query_params):
return super().call(apiModule='UserCountry', apiAction='getCountry', period='range', width=490, height=260,
graphType='horizontalBar', **query_params)
class PiwikQueryReportEventGraphDevices(PiwikQueryReportEventGraphBase):
def call(self, **query_params):
return super().call(apiModule='UserSettings', apiAction='getOS', period='range', width=320, height=260,
graphType='horizontalBar', **query_params)
| {
"content_hash": "cb6d896c1855b594f8ca0988a5ccc1e9",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 115,
"avg_line_length": 42.23255813953488,
"alnum_prop": 0.6607929515418502,
"repo_name": "ThiefMaster/indico-plugins",
"id": "823399b4ede2b188aa6384c07b6a6ef6a3b13257",
"size": "2059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piwik/indico_piwik/queries/graphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4662"
},
{
"name": "HTML",
"bytes": "48203"
},
{
"name": "JavaScript",
"bytes": "15189"
},
{
"name": "Python",
"bytes": "307878"
},
{
"name": "Shell",
"bytes": "2172"
}
],
"symlink_target": ""
} |
from scout.constants import USER_DEFAULT_TRACKS
from scout.models import User
def build_user(user_info):
"""Build a user object
Args:
user_info(dict): A dictionary with user information
Returns:
user_obj(scout.models.User)
"""
try:
email = user_info["email"]
except KeyError as err:
raise KeyError("A user has to have a email")
try:
name = user_info["name"]
except KeyError as err:
raise KeyError("A user has to have a name")
user_obj = User(email=email, name=name, id=user_info.get("id"), igv_tracks=USER_DEFAULT_TRACKS)
##TODO check that these are on the correct format
if "roles" in user_info:
user_obj["roles"] = user_info["roles"]
if "location" in user_info:
user_obj["location"] = user_info["location"]
if "institutes" in user_info:
user_obj["institutes"] = user_info["institutes"]
return user_obj
| {
"content_hash": "fab83bd42e1c14acc5ddf53465754c39",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 99,
"avg_line_length": 26.166666666666668,
"alnum_prop": 0.6284501061571125,
"repo_name": "Clinical-Genomics/scout",
"id": "69b28b11629f5f793a7057d02b8503c129d16f25",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scout/build/user.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12516"
},
{
"name": "Dockerfile",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "911931"
},
{
"name": "JavaScript",
"bytes": "32692"
},
{
"name": "Makefile",
"bytes": "1046"
},
{
"name": "Python",
"bytes": "2419990"
}
],
"symlink_target": ""
} |
import diventi.accounts.models
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0231_auto_20190820_2207'),
]
operations = [
migrations.AlterModelManagers(
name='diventiuser',
managers=[
('objects', diventi.accounts.models.DiventiUserManager()),
],
),
]
| {
"content_hash": "e3ab958e94400060a59163d73796a656",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.38888888888889,
"alnum_prop": 0.5831265508684863,
"repo_name": "flavoi/diventi",
"id": "c1db4b44fcd65d27c85655efc451cc1b64e59ca7",
"size": "452",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/accounts/migrations/0232_auto_20191006_1625.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function, division, absolute_import
from shutil import rmtree
from tempfile import mkdtemp
class WithTempDir(object):
tempdir = None
@classmethod
def setUpClass(cls):
super(WithTempDir, cls).setUpClass()
cls.tempdir = mkdtemp()
@classmethod
def tearDownClass(cls):
if cls.tempdir:
rmtree(cls.tempdir)
super(WithTempDir, cls).tearDownClass()
| {
"content_hash": "4ab23fda6918550c084521593700b3af",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 82,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.6776315789473685,
"repo_name": "climapulse/dj-bgfiles",
"id": "c15bafd635bbe1d09e267858ba2edb0c218f26a7",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1247"
},
{
"name": "Python",
"bytes": "89794"
}
],
"symlink_target": ""
} |
__author__ = 'Chuck Dilts'
import comware
comware.CLI('system-view ; undo int tun 1 ; undo evi site-id ; undo evi designated-vlan ; return ')
comware.CLI('system-view ; interface GigabitEthernet2/0 ; undo evi enable ; return ')
print 'EVI has been removed'
| {
"content_hash": "aab686ce64a63a3083ff06165321a1e7",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 99,
"avg_line_length": 22.083333333333332,
"alnum_prop": 0.7056603773584905,
"repo_name": "networkingdvi/HPN-Scripting",
"id": "989dfab3e788026103080de915b054efe28ab6db",
"size": "372",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "remove-cloud-burst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "108117"
}
],
"symlink_target": ""
} |
"""
Test of basic math operations on the Tensors and compare with numpy results
The Tensor types includes GPU and CPU Tensors
"""
from neon.backends import gen_backend
import numpy as np
def init_helper(lib, inA, inB, dtype):
A = lib.array(inA, dtype=dtype)
B = lib.array(inB, dtype=dtype)
C = lib.empty(inB.shape, dtype=dtype)
return A, B, C
def math_helper(lib, op, inA, inB, dtype):
A, B, C = init_helper(lib, inA, inB, dtype)
if op == '+':
C[:] = A + B
elif op == '-':
C[:] = A - B
elif op == '*':
C[:] = A * B
elif op == '/':
C[:] = A / B
elif op == '>':
C[:] = A > B
elif op == '>=':
C[:] = A >= B
elif op == '<':
C[:] = A < B
elif op == '<=':
C[:] = A <= B
return C
def compare_helper(op, inA, inB, dtype):
numpy_result = math_helper(np, op, inA, inB, dtype=np.float32)
if np.dtype(dtype).kind == 'i' or np.dtype(dtype).kind == 'u':
numpy_result = np.around(numpy_result)
numpy_result = numpy_result.clip(np.iinfo(dtype).min, np.iinfo(dtype).max)
numpy_result = numpy_result.astype(dtype)
if dtype in (np.float32, np.float16):
gpu = gen_backend(backend='gpu', default_dtype=dtype)
nervanaGPU_result = math_helper(gpu, op, inA, inB, dtype=dtype)
nervanaGPU_result = nervanaGPU_result.get()
np.allclose(numpy_result, nervanaGPU_result, rtol=0, atol=1e-5)
cpu = gen_backend(backend='cpu', default_dtype=dtype)
nervanaCPU_result = math_helper(cpu, op, inA, inB, dtype=dtype)
nervanaCPU_result = nervanaCPU_result.get()
np.allclose(numpy_result, nervanaCPU_result, rtol=0, atol=1e-5)
def rand_unif(dtype, dims):
if np.dtype(dtype).kind == 'f':
return np.random.uniform(-1, 1, dims).astype(dtype)
else:
iinfo = np.iinfo(dtype)
return np.around(np.random.uniform(iinfo.min, iinfo.max, dims)).clip(iinfo.min, iinfo.max)
def test_math():
dims = (1024, 1024)
for dtype in (np.float32, np.float16):
randA = rand_unif(dtype, dims)
randB = rand_unif(dtype, dims)
compare_helper('+', randA, randB, dtype)
compare_helper('-', randA, randB, dtype)
compare_helper('*', randA, randB, dtype)
compare_helper('>', randA, randB, dtype)
compare_helper('>=', randA, randB, dtype)
compare_helper('<', randA, randB, dtype)
compare_helper('<=', randA, randB, dtype)
| {
"content_hash": "c632debdd99a3c6bbaa8602c29f80d2b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 98,
"avg_line_length": 31.31645569620253,
"alnum_prop": 0.5844785772029103,
"repo_name": "misko/neon",
"id": "466b814ddde7eaae7927f744dc04f5929d292bd5",
"size": "3236",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neon/backends/tests/test_tensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "13448"
},
{
"name": "CSS",
"bytes": "810211"
},
{
"name": "Cuda",
"bytes": "87750"
},
{
"name": "Makefile",
"bytes": "8982"
},
{
"name": "Python",
"bytes": "777025"
}
],
"symlink_target": ""
} |
from canvas_sdk import client, utils
def get_kaltura_config(request_ctx, **request_kwargs):
"""
Return the config information for the Kaltura plugin in json format.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: Get Kaltura config
:rtype: requests.Response (with void data)
"""
path = '/v1/services/kaltura'
url = request_ctx.base_api_url + path.format()
response = client.get(request_ctx, url, **request_kwargs)
return response
def start_kaltura_session(request_ctx, **request_kwargs):
"""
Start a new Kaltura session, so that new media can be recorded and uploaded
to this Canvas instance's Kaltura instance.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:return: Start Kaltura session
:rtype: requests.Response (with void data)
"""
path = '/v1/services/kaltura_session'
url = request_ctx.base_api_url + path.format()
response = client.post(request_ctx, url, **request_kwargs)
return response
| {
"content_hash": "5217581deafaef0157eeafa364461329",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 28.46153846153846,
"alnum_prop": 0.6702702702702703,
"repo_name": "penzance/canvas_python_sdk",
"id": "6530fb4d54ec6f25a4644f27e2f1475cc4c67e15",
"size": "1110",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "canvas_sdk/methods/services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1066725"
}
],
"symlink_target": ""
} |
"""Embedding result protobuf."""
import dataclasses
from typing import Any, List
import numpy as np
from tensorflow_lite_support.cc.task.processor.proto import embedding_pb2
from tensorflow_lite_support.python.task.core.optional_dependencies import doc_controls
_FeatureVectorProto = embedding_pb2.FeatureVector
_EmbeddingProto = embedding_pb2.Embedding
_EmbeddingResultProto = embedding_pb2.EmbeddingResult
@dataclasses.dataclass
class FeatureVector:
"""A dense feature vector.
Only one of the two fields is ever present.
Feature vectors are assumed to be one-dimensional and L2-normalized.
Attributes:
value: A NumPy array indidcating the raw output of the embedding layer. The
datatype of elements in the array can be either float or uint8 if
`quantize` is set to True in `EmbeddingOptions`.
"""
value: np.ndarray
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _FeatureVectorProto:
"""Generates a protobuf object to pass to the C++ layer."""
if self.value.dtype == float:
return _FeatureVectorProto(value_float=self.value)
elif self.value.dtype == np.uint8:
return _FeatureVectorProto(value_string=bytes(self.value))
else:
raise ValueError("Invalid dtype. Only float and np.uint8 are supported.")
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(cls, pb2_obj: _FeatureVectorProto) -> "FeatureVector":
"""Creates a `FeatureVector` object from the given protobuf object."""
if pb2_obj.value_float:
return FeatureVector(
value=np.array(pb2_obj.value_float, dtype=float))
elif pb2_obj.value_string:
return FeatureVector(
value=np.array(bytearray(pb2_obj.value_string), dtype=np.uint8))
else:
raise ValueError("Either value_float or value_string must exist.")
def __eq__(self, other: Any) -> bool:
"""Checks if this object is equal to the given object.
Args:
other: The object to be compared with.
Returns:
True if the objects are equal.
"""
if not isinstance(other, FeatureVector):
return False
return self.to_pb2().__eq__(other.to_pb2())
@dataclasses.dataclass
class Embedding:
"""Result produced by one of the embedder model output layers.
Attributes:
feature_vector: The output feature vector.
output_index: The index of the model output layer that produced this feature
vector.
"""
feature_vector: FeatureVector
output_index: int
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _EmbeddingProto:
"""Generates a protobuf object to pass to the C++ layer."""
return _EmbeddingProto(
feature_vector=self.feature_vector.to_pb2(),
output_index=self.output_index)
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(cls, pb2_obj: _EmbeddingProto) -> "Embedding":
"""Creates a `Embedding` object from the given protobuf object."""
return Embedding(
feature_vector=FeatureVector.create_from_pb2(pb2_obj.feature_vector),
output_index=pb2_obj.output_index)
def __eq__(self, other: Any) -> bool:
"""Checks if this object is equal to the given object.
Args:
other: The object to be compared with.
Returns:
True if the objects are equal.
"""
if not isinstance(other, Embedding):
return False
return self.to_pb2().__eq__(other.to_pb2())
@dataclasses.dataclass
class EmbeddingResult:
"""Embeddings produced by the Embedder.
Attributes:
embeddings: The embeddings produced by each of the model output layers.
Except in advanced cases, the embedding model has a single output layer,
and this list is thus made of a single element feature vector.
"""
embeddings: List[Embedding]
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _EmbeddingResultProto:
"""Generates a protobuf object to pass to the C++ layer."""
return _EmbeddingResultProto(
embeddings=[embedding.to_pb2() for embedding in self.embeddings])
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(cls, pb2_obj: _EmbeddingResultProto) -> "EmbeddingResult":
"""Creates a `EmbeddingResult` object from the given protobuf object."""
return EmbeddingResult(embeddings=[
Embedding.create_from_pb2(embedding) for embedding in pb2_obj.embeddings
])
def __eq__(self, other: Any) -> bool:
"""Checks if this object is equal to the given object.
Args:
other: The object to be compared with.
Returns:
True if the objects are equal.
"""
if not isinstance(other, EmbeddingResult):
return False
return self.to_pb2().__eq__(other.to_pb2())
| {
"content_hash": "59856e79db8441b0b16a4d055314b94b",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 87,
"avg_line_length": 30.258064516129032,
"alnum_prop": 0.6978678038379531,
"repo_name": "nwjs/chromium.src",
"id": "3ca5b14c18018a5ab7b28ad0285c47ff21a7e8af",
"size": "5298",
"binary": false,
"copies": "9",
"ref": "refs/heads/nw70",
"path": "third_party/tflite_support/src/tensorflow_lite_support/python/task/processor/proto/embedding_pb2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
__author__ = 'luqitao'
import os
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import log as LOG
CURRENT_PATH = os.path.dirname(os.path.abspath(__file__))
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=_subprocess_setup,
close_fds=True, env=env)
def create_process(cmd, root_helper=None, addl_env=None):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
LOG.debug("Running command: %s" % cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return obj, cmd
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False, log_fail_as_error=True,
extra_ok_codes=None):
try:
obj, cmd = create_process(cmd, root_helper=root_helper,
addl_env=addl_env)
_stdout, _stderr = (process_input and
obj.communicate(process_input) or
obj.communicate())
obj.stdin.close()
m = "\nCommand: %s\nExit code: %s\nStdout: %r\n, Stderr: %r" % (cmd, obj.returncode, _stdout, _stderr)
extra_ok_codes = extra_ok_codes or []
if obj.returncode and obj.returncode in extra_ok_codes:
obj.returncode = None
if obj.returncode and log_fail_as_error:
LOG.error(m)
else:
LOG.debug(m)
if obj.returncode and check_exit_code:
raise RuntimeError(m)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout
| {
"content_hash": "860b46bdd6d6361ddebb8c41c8ffd07f",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 111,
"avg_line_length": 33.48148148148148,
"alnum_prop": 0.5859144542772862,
"repo_name": "Hybrid-Cloud/badam",
"id": "ba6ebddd63e855c0d9199c4d3078861ec5502c05",
"size": "2756",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patches_tool/vcloud_patch/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
} |
"""
Tests for the JSON encoder / decoder.
"""
import datetime
import decimal
import sys
import unicodedata
import unittest
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata.value_mapping import json_format
__version__ = "$Revision-Id:$"
_AE = unicodedata.lookup("LATIN SMALL LETTER A WITH DIAERESIS")
class MetadataValueTestCase(unittest.TestCase):
def setUp(self):
self._value = json_format.MetadataValue("None")
def testComparison(self):
self.assertEquals(self._value, self._value)
self.assertEquals(hash(self._value), hash(self._value))
self.assertNotEquals(self._value, json_format.MetadataValue(12))
self.assertNotEquals(hash(self._value), hash(json_format.MetadataValue(12)))
self.assertNotEquals(self._value, None)
self.assertNotEquals(hash(self._value), hash(None))
def testRepresentation(self):
self.assertEquals(str(self._value), "'None'")
def testGuessRepresentation(self):
self.assertEquals(self._value.guessRepresentation(), ["None"])
def testValue(self):
self.assertEquals(self._value.value, "None")
class ConvertFromPersistenceFormatTestCase(unittest.TestCase):
def testError(self):
# Invalid JSON
self.assertRaises(PersistenceError,
json_format.convertFromPersistenceFormat, "as")
# No string
self.assertRaises(PersistenceError,
json_format.convertFromPersistenceFormat, None)
def testBoolValue(self):
self.assertTrue(json_format.convertFromPersistenceFormat("true"))
self.assertFalse(json_format.convertFromPersistenceFormat("false"))
def testStringValue(self):
self.assertEquals(json_format.convertFromPersistenceFormat(u'"test"'), u"test")
self.assertEquals(json_format.convertFromPersistenceFormat('"test"'), "test")
def testNumericValue(self):
self.assertEquals(json_format.convertFromPersistenceFormat("4.5"),
decimal.Decimal("4.5"))
self.assertEquals(json_format.convertFromPersistenceFormat("5"),
decimal.Decimal("5"))
def testDatetimeValue(self):
# From Iso8601.
persistedValue = u'"2006-10-16T08:19:39Z"'
metdataValue = json_format.convertFromPersistenceFormat(persistedValue)
self.assertEquals(metdataValue, datetime.datetime(2006, 10, 16, 10, 19, 39))
def testListValue(self):
# Empty list
metdataValue = json_format.convertFromPersistenceFormat("[]")
self.assertEquals(metdataValue, list())
# Mixed list
self.assertEquals(
json_format.convertFromPersistenceFormat('["a", "b", 1, "2006-10-16T08:19:39Z"]'),
["a", "b", decimal.Decimal(1), datetime.datetime(2006, 10, 16, 10, 19, 39)])
# Nested list
jsonString = '[[["2006-10-16T08:19:39Z"]]]'
self.assertEquals(
json_format.convertFromPersistenceFormat(jsonString),
[[[datetime.datetime(2006, 10, 16, 10, 19, 39)]]])
def testDictValues(self):
# Empty dict
metdataValue = json_format.convertFromPersistenceFormat("{}")
self.assertEquals(metdataValue, dict())
# Mixed and nested dict
jsonString = '{"name": "me", "age": 30, ' \
+ '"address":{"street": "there", "number": 1, ' \
+ '"city": {"name": "there", "build": ["2006-10-16T08:19:39Z"]}}}'
expectedResult = {"name": "me", "age": decimal.Decimal(30),
"address":{"street": "there", "number": decimal.Decimal(1),
"city": {"name": "there",
"build": [datetime.datetime(2006, 10, 16, 10, 19, 39)]}}}
self.assertEquals(json_format.convertFromPersistenceFormat(jsonString),
expectedResult)
class GetPersistenceRepresentationTestCase(unittest.TestCase):
def testError(self):
# Unsupported type
self.assertRaises(
PersistenceError,
json_format.convertToPersistenceFormat, json_format.MetadataValue(""))
def testBoolValue(self):
self.assertEquals(json_format.convertToPersistenceFormat(True), "true")
self.assertEquals(json_format.convertToPersistenceFormat(False), "false")
def testNoneValue(self):
self.assertEquals(json_format.convertToPersistenceFormat(None), "null")
def testStringValue(self):
self.assertEquals(json_format.convertToPersistenceFormat(u"test"), '"test"')
self.assertEquals(json_format.convertToPersistenceFormat("test"), '"test"')
# Invalid raw string
orignalFunction = sys.getdefaultencoding
sys.getdefaultencoding = lambda: None # Mock encoding determination
try:
self.assertRaises(
PersistenceError, json_format.convertToPersistenceFormat, _AE.encode("Latin-1)"))
finally:
sys.getdefaultencoding = orignalFunction
def testNumericValue(self):
# Decimals
persistedValue = decimal.Decimal("4.5")
self.assertEquals(json_format.convertToPersistenceFormat(persistedValue), u"4.5")
persistedValue = decimal.Decimal("5")
self.assertEquals(json_format.convertToPersistenceFormat(persistedValue), u"5")
# Raw integer
self.assertEquals(json_format.convertToPersistenceFormat(5), u"5")
# Raw float
self.assertEquals(json_format.convertToPersistenceFormat(4.5), u"4.5")
def testDatetimeValue(self):
persistedValue = datetime.datetime(2006, 10, 16, 10, 19, 39)
self.assertEquals(json_format.convertToPersistenceFormat(persistedValue),
'"2006-10-16T08:19:39Z"')
def testListValue(self):
persistedValue = [decimal.Decimal("2006"), decimal.Decimal("10.0"),
decimal.Decimal("16"), decimal.Decimal("10.01")]
self.assertEquals(json_format.convertToPersistenceFormat(persistedValue),
u"[2006, 10.0, 16, 10.01]")
persistedValue = list()
self.assertEquals(json_format.convertToPersistenceFormat(persistedValue),
u"[]")
def testDictValue(self):
self.assertEquals(json_format.convertToPersistenceFormat(dict()), u"{}")
| {
"content_hash": "4d22cfc5c7a5f088947c7022c49e2243",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 97,
"avg_line_length": 41.43558282208589,
"alnum_prop": 0.6103050044418122,
"repo_name": "DLR-SC/DataFinder",
"id": "79256cd657d83a00f3476d7549633f32617afbde",
"size": "8448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/persistence/metadata/value_mapping/json_format_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
"""Tests for model.py.
"""
# TODO(crbug.com/1148168): Set up these tests to run on the tryjobs.
import unittest
from model import Model
class ModelTest(unittest.TestCase):
"""Tests for model.py"""
def assert_project(self, project, name, id_, summary, owners):
self.assertEqual(project.name, name)
self.assertEqual(project.id, id_)
self.assertEqual(project.summary.strip(), summary)
self.assertEqual(len(project.owners), len(owners))
for actual, expected in zip(project.owners, owners):
self.assertEqual(actual, expected)
def assert_event(self, event, name, summary):
self.assertEqual(event.name, name)
self.assertEqual(event.summary.strip(), summary)
def assert_metric(self, metric, name, type_, summary):
self.assertEqual(metric.name, name)
self.assertEqual(metric.type, type_)
self.assertEqual(metric.summary.strip(), summary)
def assert_model_raises(self, xml):
raised = False
try:
Model(xml)
except ValueError:
raised = True
self.assertTrue(raised)
def test_valid_xml(self):
xml = """\
<structured-metrics>
<project name="ProjectOne">
<owner>test1@chromium.org</owner>
<owner>test2@chromium.org</owner>
<id>none</id>
<summary> Test project. </summary>
<event name="EventOne">
<summary> Test event. </summary>
<metric name="MetricOne" type="int">
<summary> Test metric. </summary>
</metric>
<metric name="MetricTwo" type="hmac-string">
<summary> Test metric. </summary>
</metric>
</event>
<event name="EventTwo">
<summary> Test event. </summary>
<metric name="MetricThree" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
<project name="ProjectTwo">
<owner>test@chromium.org</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="EventThree">
<summary> Test event. </summary>
<metric name="MetricFour" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>"""
data = Model(xml)
self.assertEqual(len(data.projects), 2)
project_one, project_two = data.projects
self.assert_project(project_one, 'ProjectOne', 'none', 'Test project.',
('test1@chromium.org', 'test2@chromium.org'))
self.assert_project(project_two, 'ProjectTwo', 'uma', 'Test project.',
('test@chromium.org', ))
self.assertEqual(len(project_one.events), 2)
self.assertEqual(len(project_two.events), 1)
event_one, event_two = project_one.events
event_three, = project_two.events
self.assert_event(event_one, 'EventOne', 'Test event.')
self.assert_event(event_two, 'EventTwo', 'Test event.')
self.assert_event(event_three, 'EventThree', 'Test event.')
self.assertEqual(len(event_one.metrics), 2)
self.assertEqual(len(event_two.metrics), 1)
self.assertEqual(len(event_three.metrics), 1)
metric_one, metric_two = event_one.metrics
metric_three, = event_two.metrics
metric_four, = event_three.metrics
self.assert_metric(metric_one, 'MetricOne', 'int', 'Test metric.')
self.assert_metric(metric_two, 'MetricTwo', 'hmac-string', 'Test metric.')
self.assert_metric(metric_three, 'MetricThree', 'int', 'Test metric.')
self.assert_metric(metric_four, 'MetricFour', 'int', 'Test metric.')
def test_owners_validation(self):
# No owner for project.
self.assert_model_raises("""\
<structured-metrics>
<project name="project">
<id>uma</id>
<summary> Test project. </summary>
<event name="EventThree">
<summary> Test event. </summary>
<metric name="MetricFour" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
# Owner is username not email.
self.assert_model_raises("""\
<structured-metrics>
<project name="project">
<owner>test@</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="EventThree">
<summary> Test event. </summary>
<metric name="MetricFour" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_id_validation(self):
# Missing ID
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@chromium.org</owner>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
# Invalid ID
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@chromium.org</owner>
<id>invalid value</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_type_validation(self):
# Missing type
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@chromium.org</owner>
<id>none</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
# Invalid type
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@chromium.org</owner>
<id>none</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="invalid value">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_duplicate_summaries(self):
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@chromium.org</owner>
<id>none</id>
<summary> Test project. </summary>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_duplicate_project_names(self):
# Two projects with name "Duplicate"
self.assert_model_raises("""\
<structured-metrics>
<project name="Duplicate">
<owner>test@</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
<project name="Duplicate">
<owner>test@</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_duplicate_event_names(self):
# Two events with name "Duplicate"
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="Duplicate">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
<event name="Duplicate">
<summary> Test event. </summary>
<metric name="MyMetric" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
def test_duplicate_metric_names(self):
# Two metrics with name "Duplicate"
self.assert_model_raises("""\
<structured-metrics>
<project name="MyProject">
<owner>test@</owner>
<id>uma</id>
<summary> Test project. </summary>
<event name="MyEvent">
<summary> Test event. </summary>
<metric name="Duplicate" type="int">
<summary> Test metric. </summary>
</metric>
<metric name="Duplicate" type="int">
<summary> Test metric. </summary>
</metric>
</event>
</project>
</structured-metrics>""")
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "f359a02257ce3541b6741e66085aa236",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 78,
"avg_line_length": 32.95238095238095,
"alnum_prop": 0.5456234516928159,
"repo_name": "scheib/chromium",
"id": "aac8a10b4567b30f07670f1c92ac0b423af8bc3a",
"size": "9897",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/metrics/structured/model_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'UserSignal.shot'
db.delete_column(u'h1ds_usersignal', 'shot')
# Deleting field 'UserSignal.is_fixed_to_shot'
db.delete_column(u'h1ds_usersignal', 'is_fixed_to_shot')
def backwards(self, orm):
# Adding field 'UserSignal.shot'
db.add_column(u'h1ds_usersignal', 'shot',
self.gf('django.db.models.fields.IntegerField')(null=True, blank=True),
keep_default=False)
# Adding field 'UserSignal.is_fixed_to_shot'
db.add_column(u'h1ds_usersignal', 'is_fixed_to_shot',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'h1ds.device': {
'Meta': {'object_name': 'Device'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_shot': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['h1ds.Shot']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'h1ds.filter': {
'Meta': {'object_name': 'Filter'},
'code': ('python_field.fields.PythonCodeField', [], {}),
'data_dim': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.FilterDim']", 'symmetrical': 'False'}),
'data_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.FilterDtype']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'h1ds.filterdim': {
'Meta': {'object_name': 'FilterDim'},
'code': ('python_field.fields.PythonCodeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'h1ds.filterdtype': {
'Meta': {'object_name': 'FilterDtype'},
'code': ('python_field.fields.PythonCodeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'h1ds.h1dssignal': {
'Meta': {'object_name': 'H1DSSignal'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'})
},
u'h1ds.h1dssignalinstance': {
'Meta': {'ordering': "('-time',)", 'object_name': 'H1DSSignalInstance'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.H1DSSignal']"}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'})
},
u'h1ds.node': {
'Meta': {'object_name': 'Node'},
'dtype': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'has_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'n_channels': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'n_dimensions': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['h1ds.Node']"}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'path_checksum': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shot': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Shot']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'h1ds.pagelet': {
'Meta': {'object_name': 'Pagelet'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'pagelet_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2048'})
},
u'h1ds.pageletcoordinates': {
'Meta': {'object_name': 'PageletCoordinates'},
'coordinates': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pagelet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Pagelet']"}),
'worksheet': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Worksheet']"})
},
u'h1ds.shot': {
'Meta': {'object_name': 'Shot'},
'device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['h1ds.Device']", 'on_delete': 'models.PROTECT'}),
'number': ('django.db.models.fields.PositiveIntegerField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {})
},
u'h1ds.usersignal': {
'Meta': {'object_name': 'UserSignal'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '2048'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'h1ds.worksheet': {
'Meta': {'object_name': 'Worksheet'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'pagelets': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['h1ds.Pagelet']", 'through': u"orm['h1ds.PageletCoordinates']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['h1ds'] | {
"content_hash": "f115c696844c524c36807398bfe9e7f0",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 187,
"avg_line_length": 66.51552795031056,
"alnum_prop": 0.54132038472313,
"repo_name": "h1ds/h1ds",
"id": "5e6f18852e20cf7b5a562e47abcfa93292d075d8",
"size": "10733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h1ds/h1ds/migrations/0006_auto__del_field_usersignal_shot__del_field_usersignal_is_fixed_to_shot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "50883"
},
{
"name": "CSS",
"bytes": "134709"
},
{
"name": "ColdFusion",
"bytes": "146218"
},
{
"name": "HTML",
"bytes": "832336"
},
{
"name": "JavaScript",
"bytes": "2727170"
},
{
"name": "Lasso",
"bytes": "24109"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PHP",
"bytes": "55307"
},
{
"name": "Perl",
"bytes": "44428"
},
{
"name": "Python",
"bytes": "756639"
},
{
"name": "Shell",
"bytes": "1910"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration
# -- -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
#extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
# 'sphinx.ext.todo', 'sphinx.ext.coverage']
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
# 'sphinx.ect.intersphinx',
'sphinx.ext.coverage']
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
if os.getenv('HUDSON_PUBLISH_DOCS'):
templates_path = ['_ga', '_templates']
else:
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'keystone'
copyright = u'2012, OpenStack, LLC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2012.1'
# The full version, including alpha/beta/rc tags.
release = '2012.1-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['old']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['keystone.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
man_pages = [
('man/keystone-manage', 'keystone-manage', u'Keystone Management Utility',
[u'OpenStack'], 1),
('man/keystone-all', 'keystone-all', u'Keystone Startup Command',
[u'OpenStack'], 1),
]
# -- Options for HTML output
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ["."]
html_theme = '_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static', 'images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
html_last_updated_fmt = os.popen(git_cmd).read()
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'keystonedoc'
# -- Options for LaTeX output
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'keystone.tex', u'Keystone Documentation',
u'OpenStack', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for Texinfo output
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'keystone', u'Keystone Documentation',
u'OpenStack', 'keystone', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'nova': ('http://nova.openstack.org', None),
'swift': ('http://swift.openstack.org', None),
'glance': ('http://glance.openstack.org', None)}
| {
"content_hash": "9482224bd1e03f0d52288aca3c0f326a",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 79,
"avg_line_length": 32.14901960784314,
"alnum_prop": 0.6993169065625763,
"repo_name": "weiyuanke/mykeystone",
"id": "319faf0685749f88691eae3f0c8b5a8cfac426e6",
"size": "8620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "731676"
},
{
"name": "Shell",
"bytes": "5279"
}
],
"symlink_target": ""
} |
__author__ = 'Dima Potekhin'
import shutil
from epicycle.derkonfigurator.WorkspaceEntity import WorkspaceEntity
from epicycle.derkonfigurator.repository import Repository
class Workspace(WorkspaceEntity):
DEFAULT_LOCAL_CONFIG_RESOURCE_NAME = "workspace_config.yaml.local.default"
LOCAL_CONFIG_FILE_NAME = "workspace_config.yaml.local"
def __init__(self, path, environment, reporter):
super(Workspace, self).__init__(path, environment, self, reporter)
self._repositories = []
def configure(self):
self.report("Configuring the workspace")
with self.report_sub_level():
should_continue = self._init()
if not should_continue:
return
self._load_repositories()
self._configure_repositories()
def _init(self):
self._local_config = self.directory.read_yaml(Workspace.LOCAL_CONFIG_FILE_NAME)
if not self._local_config:
self.report("Initializing a fresh workspace!")
template_path = self.environment.resources.to_full_path(Workspace.DEFAULT_LOCAL_CONFIG_RESOURCE_NAME)
shutil.copy(template_path, self.directory.to_full_path(Workspace.LOCAL_CONFIG_FILE_NAME))
self.report("Please set-up Der Konfigurator by editing %s" % Workspace.LOCAL_CONFIG_FILE_NAME)
self.report("Rerun after you finished configuring")
return False
else:
self._external_repositories_path = self._local_config['external_repositories']
return True
def _load_repositories(self):
for directory in self.directory.list_subdirs_with_file(Repository.CONFIG_FILE_NAME):
self._repositories.append(Repository(self, directory.path))
def _configure_repositories(self):
for repository in self._repositories:
repository.configure()
| {
"content_hash": "b9260173cc73ee1cdbbc058bdc90e01e",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 113,
"avg_line_length": 37.5,
"alnum_prop": 0.6693333333333333,
"repo_name": "open-epicycle/epicycle.derkonfigurator-py",
"id": "a49db97757002bbd74e487cf5aede451e352384d",
"size": "1875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/epicycle.derkonfigurator-py/epicycle/derkonfigurator/workspace/Workspace.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "705"
},
{
"name": "C#",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "54738"
}
],
"symlink_target": ""
} |
"""
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import kodi
import log_utils # @UnusedImport
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants import VIDEO_TYPES
from salts_lib.constants import XHR
import scraper
BASE_URL = 'https://fmovie.co'
INFO_URL = BASE_URL + '/video_info/iframe'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE])
@classmethod
def get_name(cls):
return 'fmovie.co'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if not source_url or source_url == FORCE_NO_MATCH: return hosters
page_url = scraper_utils.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=.5)
match = re.search('var\s*video_id="([^"]+)', html)
if not match: return hosters
video_id = match.group(1)
data = {'v': video_id}
headers = {'Referer': page_url}
headers.update(XHR)
html = self._http_get(INFO_URL, data=data, headers=headers, cache_limit=.5)
sources = scraper_utils.parse_json(html, INFO_URL)
for source in sources:
match = re.search('url=(.*)', sources[source])
if not match: continue
stream_url = urllib.unquote(match.group(1))
host = scraper_utils.get_direct_hostname(self, stream_url)
if host == 'gvideo':
quality = scraper_utils.gv_get_quality(stream_url)
else:
quality = scraper_utils.height_get_quality(source)
stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
hosters.append(hoster)
return hosters
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
search_url = scraper_utils.urljoin(self.base_url, '/results')
html = self._http_get(search_url, params={'q': title}, cache_limit=1)
pattern = 'class="video_title".*?href="([^"]+)">([^<]+).*?Year</b>:\s*(\d*)'
for match in re.finditer(pattern, html, re.DOTALL):
url, match_title, match_year = match.groups()
if not year or not match_year or year == match_year:
result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(url)}
results.append(result)
return results
| {
"content_hash": "2a837509ffb9be6158c425b20861b7e2",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 157,
"avg_line_length": 40.97701149425287,
"alnum_prop": 0.6286115007012623,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "068a0b3341bbfa20489ab8f218cad02a5b62989e",
"size": "3565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.video.salts/scrapers/fmovie_scraper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
from app.NLP_LDA import *
def test_read_letters():
letters = read_letter_from_file("test\example_shareholders_letter.csv")
assert len(letters) == 38
def test_read_company():
company = read_letter_from_file("test\example_shareholders_letter.csv")
assert len(company) == 38
def test_number_of_company():
company = read_company_from_file("test\example_shareholders_letter.csv")
number_of_company = len(set(company))
assert number_of_company == 5
def test_name_of_company():
company = read_company_from_file("test\example_shareholders_letter.csv")
name_of_company = set(company)
assert name_of_company == {'Aspen ', 'Adobe ', 'ea ', 'Citrix ', 'Compuware '}
| {
"content_hash": "d1ce85809b544a3bdf58dc423b5da4fe",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 82,
"avg_line_length": 36.73684210526316,
"alnum_prop": 0.6905444126074498,
"repo_name": "lkc9015/freestyle_project",
"id": "ffdf31b8656b130415f96133646ca7dfdafeabd1",
"size": "698",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_NLP_LDA.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10032"
}
],
"symlink_target": ""
} |
"""
Django settings for amoki_music project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import sys
# Environment
PYTHON_ENV = os.environ.get('PYTHON_ENV', 'development')
if PYTHON_ENV == 'production': # pragma: no cover
DEBUG = False
WS4REDIS_DB = 1
else:
DEBUG = True
WS4REDIS_DB = 0
WSGI_APPLICATION = 'ws4redis.django_runserver.application'
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
ADMINS = (
('Amoki', 'hugo.duroux@gmail.com'),
('Eirika', 'chanove.tristan@gmail.com'),
)
MANAGERS = ADMINS
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5h9@)57rjgoe3m_sb12kcp-ku7w!#x86a_k5_59t#g=!e$nhha'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admindocs',
# Pip lib
'ws4redis',
'rest_framework',
'ordered_model',
'rest_framework_swagger',
'django_nose',
# Our apps
'player',
'music',
'endpoints',
'sources',
'website',
)
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'amoki_music.urls'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'fr-FR'
TIME_ZONE = 'Europe/Paris'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__))
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, 'collected-static/')
# URL prefix for static files.
# Example: "http://media.l
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'website/static',
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Websockets
WEBSOCKET_URL = '/ws/'
WS4REDIS_SUBSCRIBER = 'player.subscriber.CustomSubscriber'
WS4REDIS_CONNECTION = {
'host': 'localhost',
'port': 6379,
'db': WS4REDIS_DB,
'password': None,
}
WS4REDIS_EXPIRE = 0
WS4REDIS_PREFIX = 'ws_' + PYTHON_ENV
WS4REDIS_HEARTBEAT = '--heartbeat--'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates/'),
],
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'ws4redis.context_processors.default',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
'debug': DEBUG
},
},
]
SESSION_ENGINE = 'redis_sessions.session'
SESSION_REDIS_PREFIX = 'session'
#
# Modules
#
SOURCES = ["youtube", "soundcloud"]
# Youtube
YOUTUBE_LANGUAGE = os.environ.get('YOUTUBE_LANGUAGE', 'FR')
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = 'amoki_music/youtube_key.json'
# Soundcloud
SOUNDCLOUD_KEY = os.environ.get('SOUNDCLOUD_KEY', None)
SWAGGER_SETTINGS = {
'exclude_namespaces': [],
'api_version': '0.1',
'api_path': '/',
'enabled_methods': [
'get',
'post',
'patch',
'delete'
],
'info': {
'contact': 'hugo.duroux@gmail.com',
'title': 'Amoki Music',
},
'doc_expansion': 'none',
'token_type': 'Bearer'
}
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
'TEST_REQUEST_DEFAULT_FORMAT': 'json'
}
# Keep the original host behind a proxy for direct use of ws://
USE_X_FORWARDED_HOST = True
TESTING = False
if 'test' in sys.argv:
# Disable migration during testsuite
class DisableMigrations(object):
def __contains__(self, item):
return True
def __getitem__(self, item):
return None
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher', # Replace hasher with a simpler and faster hash method
)
DEBUG = False
TESTING = True
MIGRATION_MODULES = DisableMigrations() # Disable migrations during tests
| {
"content_hash": "fd029b8c76c626d04c7a3a56c52da0b1",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 112,
"avg_line_length": 25.959349593495936,
"alnum_prop": 0.6570623238333855,
"repo_name": "Amoki/Amoki-Music",
"id": "21eb6adcc7f2223d999901c1c63fdb74c7405dd5",
"size": "6386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amoki_music/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14853"
},
{
"name": "HTML",
"bytes": "30566"
},
{
"name": "JavaScript",
"bytes": "53165"
},
{
"name": "Python",
"bytes": "127142"
}
],
"symlink_target": ""
} |
"""Utils for creating PerfZero benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import REDACTED
from absl import flags
from absl import logging
from absl.testing import flagsaver
import tensorflow as tf # pylint: disable=g-bad-import-order
FLAGS = flags.FLAGS
class PerfZeroBenchmark(tf.test.Benchmark):
"""Common methods used in PerfZero Benchmarks.
Handles the resetting of flags between tests, loading of default_flags,
overriding of defaults. PerfZero (OSS) runs each test in a separate
process reducing some need to reset the flags.
"""
local_flags = None
def __init__(self,
output_dir=None,
default_flags=None,
flag_methods=None,
tpu=None):
"""Initialize class.
Args:
output_dir: Base directory to store all output for the test.
default_flags: Set of flags to pass to model.
flag_methods: Set of flag methods to run during setup.
tpu: (optional) TPU name to use in a TPU benchmark.
"""
if os.getenv('BENCHMARK_OUTPUT_DIR'):
self.output_dir = os.getenv('BENCHMARK_OUTPUT_DIR')
elif output_dir:
self.output_dir = output_dir
else:
self.output_dir = '/tmp'
self.default_flags = default_flags or {}
self.flag_methods = flag_methods or {}
if os.getenv('BENCHMARK_TPU'):
resolved_tpu = os.getenv('BENCHMARK_TPU')
elif tpu:
resolved_tpu = tpu
else:
resolved_tpu = None
if resolved_tpu:
# TPU models are expected to accept a --tpu=name flag. PerfZero creates
# the TPU at runtime and passes the TPU's name to this flag.
self.default_flags['tpu'] = resolved_tpu
def _get_model_dir(self, folder_name):
"""Returns directory to store info, e.g. saved model and event log."""
return os.path.join(self.output_dir, folder_name)
def _setup(self):
"""Sets up and resets flags before each test."""
logging.set_verbosity(logging.INFO)
if PerfZeroBenchmark.local_flags is None:
for flag_method in self.flag_methods:
flag_method()
# Loads flags to get defaults to then override. List cannot be empty.
flags.FLAGS(['foo'])
# Overrides flag values with defaults for the class of tests.
for k, v in self.default_flags.items():
setattr(FLAGS, k, v)
saved_flag_values = flagsaver.save_flag_values()
PerfZeroBenchmark.local_flags = saved_flag_values
else:
flagsaver.restore_flag_values(PerfZeroBenchmark.local_flags)
| {
"content_hash": "eef3ce6b6d90e625b682d54580e39638",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 33.26923076923077,
"alnum_prop": 0.6685934489402697,
"repo_name": "mlperf/training_results_v0.7",
"id": "164683c76eb2877ab7457f51cf43b91563de7e54",
"size": "3284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Google/benchmarks/bert/implementations/bert-cloud-TF2.0-gpu-v100-8/tf2_common/utils/testing/perfzero_benchmark.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
"""
Django settings for sassafras project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_NAME = 'sassafras'
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*(^jxc&^d46%8bi)dzq3!kezs=bnnh&lbgalj0%zy5y9w!^voi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'debug_toolbar',
'bootstrap3',
'sass_processor',
'trello_cards',
'storages'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'sassafras.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sassafras.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, PROJECT_NAME, STATIC_URL)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'sass_processor.finders.CssFinder',
)
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# Django storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_STORAGE_BUCKET_NAME = 'django-sassafras-test'
AWS_ACCESS_KEY = os.getenv('AWS_S3_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_S3_SECRET_ACCESS_KEY')
| {
"content_hash": "7d780e221bc5f8e5a656f71fa461a41e",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 71,
"avg_line_length": 25.876923076923077,
"alnum_prop": 0.700653983353151,
"repo_name": "mliudev/sassafras",
"id": "8f8cbc67238882e5aef5639431d5085d5c2d2769",
"size": "3364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sassafras/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "JavaScript",
"bytes": "78108"
},
{
"name": "Python",
"bytes": "4197"
}
],
"symlink_target": ""
} |
class PBH5ToolsException(Exception):
def __init__(self, command, msg):
self.command = command
self.msg = msg
def __str__(self):
return "command: " + self.command + " produced the following error: " + self.msg
| {
"content_hash": "1ea40f1fb2685a7e13f82127db46fe65",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 88,
"avg_line_length": 35.142857142857146,
"alnum_prop": 0.5975609756097561,
"repo_name": "PacificBiosciences/pbh5tools",
"id": "4424276c8628efaec80e2c928c3d5f5d1411422e",
"size": "2076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pbh5tools/PBH5ToolsException.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2264"
},
{
"name": "Makefile",
"bytes": "1278"
},
{
"name": "Perl",
"bytes": "18811"
},
{
"name": "Perl 6",
"bytes": "24948"
},
{
"name": "Python",
"bytes": "126436"
},
{
"name": "Shell",
"bytes": "114"
}
],
"symlink_target": ""
} |
"""Module for wrapper cyber record."""
import collections
import importlib
import os
import sys
from google.protobuf.descriptor_pb2 import FileDescriptorProto
# init vars
CYBER_PATH = os.environ['CYBER_PATH']
CYBER_DIR = os.path.split(CYBER_PATH)[0]
sys.path.append(CYBER_PATH + "/third_party/")
sys.path.append(CYBER_PATH + "/lib/")
sys.path.append(CYBER_PATH + "/python/cyber")
sys.path.append(CYBER_PATH + "/python/cyber_py")
sys.path.append(CYBER_DIR + "/python/")
sys.path.append(CYBER_DIR + "/cyber/")
_CYBER_RECORD = importlib.import_module('_cyber_record')
PyBagMessage = collections.namedtuple('PyBagMessage',
'topic message data_type timestamp')
class RecordReader(object):
"""
Class for cyber RecordReader wrapper.
"""
##
# @brief the constructor function.
#
# @param file_name the record file name.
def __init__(self, file_name):
self.record_reader = _CYBER_RECORD.new_PyRecordReader(file_name)
def __del__(self):
_CYBER_RECORD.delete_PyRecordReader(self.record_reader)
##
# @brief Read message from bag file.
#
# @param start_time the start time to read.
# @param end_time the end time to read.
#
# @return return (channnel, data, data_type, timestamp)
def read_messages(self, start_time=0, end_time=18446744073709551615):
while True:
message = _CYBER_RECORD.PyRecordReader_ReadMessage(
self.record_reader, start_time, end_time)
if not message["end"]:
yield PyBagMessage(message["channel_name"], message["data"],
message["data_type"], message["timestamp"])
else:
# print "No message more."
break
##
# @brief Return message count of the channel in current record file.
#
# @param channel_name the channel name.
#
# @return return the message count.
def get_messagenumber(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageNumber(
self.record_reader, channel_name)
##
# @brief Get the corresponding message type of channel.
#
# @param channel_name channel name.
#
# @return return the name of ther string type.
def get_messagetype(self, channel_name):
return _CYBER_RECORD.PyRecordReader_GetMessageType(
self.record_reader, channel_name)
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordReader_GetProtoDesc(
self.record_reader, channel_name)
def get_headerstring(self):
"""
Return message header string.
"""
return _CYBER_RECORD.PyRecordReader_GetHeaderString(self.record_reader)
def reset(self):
"""
Return reset.
"""
return _CYBER_RECORD.PyRecordReader_Reset(self.record_reader)
def get_channellist(self):
"""
Return current channel names list.
"""
return _CYBER_RECORD.PyRecordReader_GetChannelList(self.record_reader)
class RecordWriter(object):
"""
Class for cyber RecordWriter wrapper.
"""
##
# @brief the constructor function.
#
# @param file_segmentation_size_kb size to segment the file, 0 is no segmentation.
# @param file_segmentation_interval_sec size to segment the file, 0 is no segmentation.
def __init__(self, file_segmentation_size_kb=0,
file_segmentation_interval_sec=0):
self.record_writer = _CYBER_RECORD.new_PyRecordWriter()
_CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, file_segmentation_size_kb)
_CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, file_segmentation_interval_sec)
def __del__(self):
_CYBER_RECORD.delete_PyRecordWriter(self.record_writer)
##
# @brief Open record file for write.
#
# @param path the file path.
#
# @return Success is Ture, other False.
def open(self, path):
return _CYBER_RECORD.PyRecordWriter_Open(self.record_writer, path)
##
# @brief Close record file.
def close(self):
"""
Close record file.
"""
_CYBER_RECORD.PyRecordWriter_Close(self.record_writer)
##
# @brief Writer channel by channelname, typename, protodesc.
#
# @param channel_name the channel name to write
# @param type_name a string of message type name.
# @param proto_desc the message descriptor.
#
# @return Success is Ture, other False.
def write_channel(self, channel_name, type_name, proto_desc):
"""
Writer channel by channelname,typename,protodesc
"""
return _CYBER_RECORD.PyRecordWriter_WriteChannel(
self.record_writer, channel_name, type_name, proto_desc)
##
# @brief Writer msg: channelname, data, writer time.
#
# @param channel_name channel name to write.
# @param data when raw is True, data processed as a rawdata, other it needs to SerializeToString
# @param time message time.
# @param raw the flag implies data whether or not a rawdata.
#
# @return Success is Ture, other False.
def write_message(self, channel_name, data, time, raw=True):
"""
Writer msg:channelname,rawmsg,writer time
"""
if raw:
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer, channel_name, data, time, "")
file_desc = data.DESCRIPTOR.file
proto = FileDescriptorProto()
file_desc.CopyToProto(proto)
proto.name = file_desc.name
desc_str = proto.SerializeToString()
return _CYBER_RECORD.PyRecordWriter_WriteMessage(
self.record_writer,
channel_name, data.SerializeToString(), time, desc_str)
def set_size_fileseg(self, size_kilobytes):
"""
Return filesegment size.
"""
return _CYBER_RECORD.PyRecordWriter_SetSizeOfFileSegmentation(
self.record_writer, size_kilobytes)
def set_intervaltime_fileseg(self, time_sec):
"""
Return file interval time.
"""
return _CYBER_RECORD.PyRecordWriter_SetIntervalOfFileSegmentation(
self.record_writer, time_sec)
def get_messagenumber(self, channel_name):
"""
Return message count.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageNumber(
self.record_writer, channel_name)
def get_messagetype(self, channel_name):
"""
Return message type.
"""
return _CYBER_RECORD.PyRecordWriter_GetMessageType(
self.record_writer, channel_name)
def get_protodesc(self, channel_name):
"""
Return message protodesc.
"""
return _CYBER_RECORD.PyRecordWriter_GetProtoDesc(
self.record_writer, channel_name)
| {
"content_hash": "4d825c759437d730565c16f6cd7e63e0",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 100,
"avg_line_length": 31.90909090909091,
"alnum_prop": 0.6284900284900284,
"repo_name": "wanglei828/apollo",
"id": "95c084596ac42f523503f927641ff0f3f2d040ed",
"size": "7802",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyber/python/cyber_py/record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "22662"
},
{
"name": "C++",
"bytes": "17378263"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "CSS",
"bytes": "40785"
},
{
"name": "Cuda",
"bytes": "97324"
},
{
"name": "Dockerfile",
"bytes": "11960"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "21068"
},
{
"name": "JavaScript",
"bytes": "364183"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1902086"
},
{
"name": "Shell",
"bytes": "302902"
},
{
"name": "Smarty",
"bytes": "33258"
}
],
"symlink_target": ""
} |
import json
import numpy as np
import os
import pytest
import sys
import time
import ray
from ray.cluster_utils import Cluster
from ray.test_utils import flat_errors
import ray.ray_constants as ray_constants
@pytest.fixture(params=[1, 4])
def ray_start_reconstruction(request):
num_nodes = request.param
plasma_store_memory = int(0.5 * 10**9)
cluster = Cluster(
initialize_head=True,
head_node_args={
"num_cpus": 1,
"object_store_memory": plasma_store_memory // num_nodes,
"redis_max_memory": 10**7,
"_internal_config": json.dumps({
"initial_reconstruction_timeout_milliseconds": 200
})
})
for i in range(num_nodes - 1):
cluster.add_node(
num_cpus=1,
object_store_memory=plasma_store_memory // num_nodes,
_internal_config=json.dumps({
"initial_reconstruction_timeout_milliseconds": 200
}))
ray.init(address=cluster.address)
yield plasma_store_memory, num_nodes, cluster
# Clean up the Ray cluster.
ray.shutdown()
cluster.shutdown()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_simple(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a remote task with no dependencies, which returns a numpy
# array of the given size.
@ray.remote
def foo(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances of the remote task.
args = []
for i in range(num_objects):
args.append(foo.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
def sorted_random_indexes(total, output_num):
random_indexes = [np.random.randint(total) for _ in range(output_num)]
random_indexes.sort()
return random_indexes
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_recursive(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with a single dependency, which returns its one
# argument.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it.
arg = no_dependency_task.remote(size)
args = []
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
@pytest.mark.skip(reason="This test often hangs or fails in CI.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_multiple_recursive(ray_start_reconstruction):
plasma_store_memory, _, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with multiple dependencies, which returns its first
# argument.
@ray.remote
def multiple_dependency(i, arg1, arg2, arg3):
arg1 = np.copy(arg1)
arg1[0] = i
return arg1
# Launch num_args instances of the root task. Then launch num_objects
# instances of the multi-dependency remote task, each dependent on the
# num_args tasks before it.
num_args = 3
args = []
for i in range(num_args):
arg = no_dependency_task.remote(size)
args.append(arg)
for i in range(num_objects):
args.append(multiple_dependency.remote(i, *args[i:i + num_args]))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
args = args[num_args:]
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
assert cluster.remaining_processes_alive()
def wait_for_errors(error_check):
# Wait for errors from all the nondeterministic tasks.
errors = []
time_left = 100
while time_left > 0:
errors = flat_errors()
if error_check(errors):
break
time_left -= 1
time.sleep(1)
# Make sure that enough errors came through.
assert error_check(errors)
return errors
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_nondeterministic_task(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 1000
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a nondeterministic remote task with no dependencies, which
# returns a random numpy array of the given size. This task should
# produce an error on the driver if it is ever reexecuted.
@ray.remote
def foo(i, size):
array = np.random.rand(size)
array[0] = i
return array
# Define a deterministic remote task with no dependencies, which
# returns a numpy array of zeros of the given size.
@ray.remote
def bar(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances, half deterministic and half
# nondeterministic.
args = []
for i in range(num_objects):
if i % 2 == 0:
args.append(foo.remote(i, size))
else:
args.append(bar.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
def error_check(errors):
if num_nodes == 1:
# In a single-node setting, each object is evicted and
# reconstructed exactly once, so exactly half the objects will
# produce an error during reconstruction.
min_errors = num_objects // 2
else:
# In a multinode setting, each object is evicted zero or one
# times, so some of the nondeterministic tasks may not be
# reexecuted.
min_errors = 1
return len(errors) >= min_errors
errors = wait_for_errors(error_check)
# Make sure all the errors have the correct type.
assert all(error["type"] == ray_constants.HASH_MISMATCH_PUSH_ERROR
for error in errors)
assert cluster.remaining_processes_alive()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**9], indirect=True)
def test_driver_put_errors(ray_start_object_store_memory):
plasma_store_memory = ray_start_object_store_memory
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it. The first instance of the task takes a numpy array
# as an argument, which is put into the object store.
args = []
arg = single_dependency.remote(0, np.zeros(size))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value starting from the beginning to force reconstruction.
# Currently, since we're not able to reconstruct `ray.put` objects that
# were evicted and whose originating tasks are still running, this
# for-loop should hang on its first iteration and push an error to the
# driver.
ray.wait([args[0]], timeout=30)
def error_check(errors):
return len(errors) > 1
errors = wait_for_errors(error_check)
assert all(error["type"] == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
or "ray.exceptions.UnreconstructableError" in error["message"]
for error in errors)
# NOTE(swang): This test tries to launch 1000 workers and breaks.
# TODO(rkn): This test needs to be updated to use pytest.
# class WorkerPoolTests(unittest.TestCase):
#
# def tearDown(self):
# ray.shutdown()
#
# def testBlockingTasks(self):
# @ray.remote
# def f(i, j):
# return (i, j)
#
# @ray.remote
# def g(i):
# # Each instance of g submits and blocks on the result of another remote
# # task.
# object_ids = [f.remote(i, j) for j in range(10)]
# return ray.get(object_ids)
#
# ray.init(num_workers=1)
# ray.get([g.remote(i) for i in range(1000)])
# ray.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| {
"content_hash": "6a1870a62289680b63c6505e88c89525",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 79,
"avg_line_length": 33.506738544474395,
"alnum_prop": 0.636553776848202,
"repo_name": "stephanie-wang/ray",
"id": "b307341bc91e48a0b74caf4c42e322e5851da49b",
"size": "12431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_stress_failure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29882"
},
{
"name": "C++",
"bytes": "2149909"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Dockerfile",
"bytes": "5499"
},
{
"name": "Go",
"bytes": "28481"
},
{
"name": "HTML",
"bytes": "30435"
},
{
"name": "Java",
"bytes": "738348"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "4058862"
},
{
"name": "Shell",
"bytes": "88736"
},
{
"name": "Starlark",
"bytes": "121207"
},
{
"name": "TypeScript",
"bytes": "64161"
}
],
"symlink_target": ""
} |
# Copyright 2002, 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# This tests correct handling of "-d1" and "-d2" options.
import BoostBuild
t = BoostBuild.Tester(["-ffile.jam"], pass_d0=False, pass_toolset=0)
t.write("file.jam", """\
actions a { }
actions quietly b { }
ALWAYS all ;
a all ;
b all ;
""")
t.run_build_system(["-d0"], stdout="")
t.run_build_system(["-d1"])
t.expect_output_lines("a all")
t.expect_output_lines("b all", False)
t.run_build_system(["-d2"])
t.expect_output_lines("a all")
t.expect_output_lines("b all")
t.cleanup()
| {
"content_hash": "25cdfb5f53946d546dff354cf4c8cbeb",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 22.838709677419356,
"alnum_prop": 0.6468926553672316,
"repo_name": "ycsoft/FatCat-Server",
"id": "61ff545ebbf85f54ca8b4edea144c4fd0c53f529",
"size": "727",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "LIBS/boost_1_58_0/tools/build/test/core_d12.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "195345"
},
{
"name": "Batchfile",
"bytes": "32367"
},
{
"name": "C",
"bytes": "9529739"
},
{
"name": "C#",
"bytes": "41850"
},
{
"name": "C++",
"bytes": "175536080"
},
{
"name": "CMake",
"bytes": "14812"
},
{
"name": "CSS",
"bytes": "282447"
},
{
"name": "Cuda",
"bytes": "26521"
},
{
"name": "FORTRAN",
"bytes": "1856"
},
{
"name": "Groff",
"bytes": "6163"
},
{
"name": "HTML",
"bytes": "148956564"
},
{
"name": "JavaScript",
"bytes": "174868"
},
{
"name": "Lex",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1045258"
},
{
"name": "Max",
"bytes": "37424"
},
{
"name": "Objective-C",
"bytes": "34644"
},
{
"name": "Objective-C++",
"bytes": "246"
},
{
"name": "PHP",
"bytes": "60249"
},
{
"name": "Perl",
"bytes": "37297"
},
{
"name": "Perl6",
"bytes": "2130"
},
{
"name": "Python",
"bytes": "1717781"
},
{
"name": "QML",
"bytes": "613"
},
{
"name": "QMake",
"bytes": "9450"
},
{
"name": "Rebol",
"bytes": "372"
},
{
"name": "Shell",
"bytes": "372652"
},
{
"name": "Tcl",
"bytes": "1205"
},
{
"name": "TeX",
"bytes": "13819"
},
{
"name": "XSLT",
"bytes": "564356"
},
{
"name": "Yacc",
"bytes": "19612"
}
],
"symlink_target": ""
} |
import urllib2
from bs4 import BeautifulSoup
from xml.dom import minidom
import random
from scrapy.utils.project import get_project_settings
import logging
import getpass
import stem
import stem.connection
from stem import Signal
from stem.control import Controller
class TorProxyMiddleware(object):
def __init__(self, *args, **kwargs):
self.import_settings()
self.req_counter = 0
def change_ip_addres(self):
with Controller.from_port(port=self.control_port) as controller:
controller.authenticate(self.password)
controller.signal(Signal.NEWNYM)
controller.close()
def import_settings(self):
settings = get_project_settings()
self.control_port = settings['CONTROL_PORT']
self.password = settings['AUTH_PASSWORD']
self.http_proxy = settings['HTTP_PROXY']
self.max_req_per_ip = settings['MAX_REQ_PER_IP']
def process_request(self, request, spider):
self.req_counter += 1
if self.max_req_per_ip is not None and self.req_counter > self.max_req_per_ip:
self.req_counter = 0
self.change_ip_addres()
request.meta['proxy'] = self.http_proxy
logging.info('Using proxy: %s' % request.meta['proxy'])
return None
class HttpProxyMiddleware(object):
proxies = []
def __init__(self, *args, **kwargs):
self.query_proxies()
def query_proxies(self):
request = urllib2.urlopen('http://proxylist.hidemyass.com/search-1311281')
if request.getcode() == 200:
soup = BeautifulSoup(request, 'html.parser')
# get the table with proxy addresses
data_grid = soup.find('table', id='listable')
max_proxies = 100
# iterate through the rows
for tr in data_grid.tbody.findAll('tr'):
i = 0
item = {}
for td in tr.findAll('td'):
if i == 1: # ip address
item['address'] = self.get_ip(td)
elif i == 2:
item['port'] = td.get_text().strip()
elif i == 6:
item['protocol'] = td.get_text().lower().strip()
i += 1
# if everything is scraped/defined, save to proxies list
if 'ip_address' and 'port' and 'protocol' in item:
self.proxies.append(item)
max_proxies -= 1
if max_proxies < 1:
break
request.close()
def get_ip(self, td):
# getting ip address is a little bit tricky but this solution
# is simple and fast, for more info check
# https://blueshellgroup.wordpress.com/2013/04/15/creating-a-private-database-of-proxies-part-2/
span = td.find('span')
styles = str(span.find('style')).split('.')
for k in range(1, len(styles)):
style = styles[k].split('{')
if 'none' in style[1]:
[s.extract() for s in span('span', {'class': style[0]})]
[s.extract() for s in span('style')]
[s.extract() for s in span(['span', 'div'], style='display:none')]
return span.get_text().replace('\n', '').replace('\t', '').strip()
@classmethod
def from_crawler(cls, crawler):
settings = get_project_settings()
return cls(crawler.settings)
def process_request(self, request, spider):
item = random.choice(self.proxies)
request.meta['proxy'] = item['protocol'] + '://' + item['address'] + ':' + item['port']
logging.info('Using proxy: %s' % request.meta['proxy'])
def remove_failed_proxy(self, request, spider):
proxy = request.meta['proxy']
logging.log(logging.DEBUG, 'Removing failed proxy...')
try:
i = 0;
for el in self.proxies:
if el['address'] in proxy:
del self.proxies[i]
proxies_num = len(self.proxies)
logging.log(logging.DEBUG, 'Removed failed proxy <%s>, %d proxies left' % (proxy, proxies_num))
if proxies_num == 0:
self.query_proxies()
return True
i += 1
except:
logging.log(logging.ERROR, 'Error while removing failed proxy')
return False
def process_exception(self, request, exception, spider):
if self.remove_failed_proxy(request, spider):
return request
return None
def process_response(self, request, response, spider):
# really brutal filter
if response.status == 200:
return response
else:
return request
| {
"content_hash": "b56109f462ef7faa70b81af137278e94",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 115,
"avg_line_length": 36.21212121212121,
"alnum_prop": 0.5548117154811716,
"repo_name": "claneave28/PythonScraper",
"id": "3fc614ded7274528c9416d384a9b14126aca46f7",
"size": "4780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amazon/middlewares/proxy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11528"
}
],
"symlink_target": ""
} |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TypeAliases(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = TypeAliases()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsTypeAliases(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
@classmethod
def TypeAliasesBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x4D\x4F\x4E\x53", size_prefixed=size_prefixed)
# TypeAliases
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TypeAliases
def I8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int8Flags, o + self._tab.Pos)
return 0
# TypeAliases
def U8(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# TypeAliases
def I16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 0
# TypeAliases
def U16(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint16Flags, o + self._tab.Pos)
return 0
# TypeAliases
def I32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# TypeAliases
def U32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# TypeAliases
def I64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int64Flags, o + self._tab.Pos)
return 0
# TypeAliases
def U64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# TypeAliases
def F32(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float32Flags, o + self._tab.Pos)
return 0.0
# TypeAliases
def F64(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# TypeAliases
def V8(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Int8Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 1))
return 0
# TypeAliases
def V8AsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Int8Flags, o)
return 0
# TypeAliases
def V8Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TypeAliases
def V8IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
return o == 0
# TypeAliases
def Vf64(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
a = self._tab.Vector(o)
return self._tab.Get(flatbuffers.number_types.Float64Flags, a + flatbuffers.number_types.UOffsetTFlags.py_type(j * 8))
return 0
# TypeAliases
def Vf64AsNumpy(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.GetVectorAsNumpy(flatbuffers.number_types.Float64Flags, o)
return 0
# TypeAliases
def Vf64Length(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TypeAliases
def Vf64IsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
return o == 0
def TypeAliasesStart(builder): builder.StartObject(12)
def Start(builder):
return TypeAliasesStart(builder)
def TypeAliasesAddI8(builder, i8): builder.PrependInt8Slot(0, i8, 0)
def AddI8(builder, i8):
return TypeAliasesAddI8(builder, i8)
def TypeAliasesAddU8(builder, u8): builder.PrependUint8Slot(1, u8, 0)
def AddU8(builder, u8):
return TypeAliasesAddU8(builder, u8)
def TypeAliasesAddI16(builder, i16): builder.PrependInt16Slot(2, i16, 0)
def AddI16(builder, i16):
return TypeAliasesAddI16(builder, i16)
def TypeAliasesAddU16(builder, u16): builder.PrependUint16Slot(3, u16, 0)
def AddU16(builder, u16):
return TypeAliasesAddU16(builder, u16)
def TypeAliasesAddI32(builder, i32): builder.PrependInt32Slot(4, i32, 0)
def AddI32(builder, i32):
return TypeAliasesAddI32(builder, i32)
def TypeAliasesAddU32(builder, u32): builder.PrependUint32Slot(5, u32, 0)
def AddU32(builder, u32):
return TypeAliasesAddU32(builder, u32)
def TypeAliasesAddI64(builder, i64): builder.PrependInt64Slot(6, i64, 0)
def AddI64(builder, i64):
return TypeAliasesAddI64(builder, i64)
def TypeAliasesAddU64(builder, u64): builder.PrependUint64Slot(7, u64, 0)
def AddU64(builder, u64):
return TypeAliasesAddU64(builder, u64)
def TypeAliasesAddF32(builder, f32): builder.PrependFloat32Slot(8, f32, 0.0)
def AddF32(builder, f32):
return TypeAliasesAddF32(builder, f32)
def TypeAliasesAddF64(builder, f64): builder.PrependFloat64Slot(9, f64, 0.0)
def AddF64(builder, f64):
return TypeAliasesAddF64(builder, f64)
def TypeAliasesAddV8(builder, v8): builder.PrependUOffsetTRelativeSlot(10, flatbuffers.number_types.UOffsetTFlags.py_type(v8), 0)
def AddV8(builder, v8):
return TypeAliasesAddV8(builder, v8)
def TypeAliasesStartV8Vector(builder, numElems): return builder.StartVector(1, numElems, 1)
def StartV8Vector(builder, numElems):
return TypeAliasesStartV8Vector(builder, numElems)
def TypeAliasesAddVf64(builder, vf64): builder.PrependUOffsetTRelativeSlot(11, flatbuffers.number_types.UOffsetTFlags.py_type(vf64), 0)
def AddVf64(builder, vf64):
return TypeAliasesAddVf64(builder, vf64)
def TypeAliasesStartVf64Vector(builder, numElems): return builder.StartVector(8, numElems, 8)
def StartVf64Vector(builder, numElems):
return TypeAliasesStartVf64Vector(builder, numElems)
def TypeAliasesEnd(builder): return builder.EndObject()
def End(builder):
return TypeAliasesEnd(builder)
try:
from typing import List
except:
pass
class TypeAliasesT(object):
# TypeAliasesT
def __init__(self):
self.i8 = 0 # type: int
self.u8 = 0 # type: int
self.i16 = 0 # type: int
self.u16 = 0 # type: int
self.i32 = 0 # type: int
self.u32 = 0 # type: int
self.i64 = 0 # type: int
self.u64 = 0 # type: int
self.f32 = 0.0 # type: float
self.f64 = 0.0 # type: float
self.v8 = None # type: List[int]
self.vf64 = None # type: List[float]
@classmethod
def InitFromBuf(cls, buf, pos):
typeAliases = TypeAliases()
typeAliases.Init(buf, pos)
return cls.InitFromObj(typeAliases)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, typeAliases):
x = TypeAliasesT()
x._UnPack(typeAliases)
return x
# TypeAliasesT
def _UnPack(self, typeAliases):
if typeAliases is None:
return
self.i8 = typeAliases.I8()
self.u8 = typeAliases.U8()
self.i16 = typeAliases.I16()
self.u16 = typeAliases.U16()
self.i32 = typeAliases.I32()
self.u32 = typeAliases.U32()
self.i64 = typeAliases.I64()
self.u64 = typeAliases.U64()
self.f32 = typeAliases.F32()
self.f64 = typeAliases.F64()
if not typeAliases.V8IsNone():
if np is None:
self.v8 = []
for i in range(typeAliases.V8Length()):
self.v8.append(typeAliases.V8(i))
else:
self.v8 = typeAliases.V8AsNumpy()
if not typeAliases.Vf64IsNone():
if np is None:
self.vf64 = []
for i in range(typeAliases.Vf64Length()):
self.vf64.append(typeAliases.Vf64(i))
else:
self.vf64 = typeAliases.Vf64AsNumpy()
# TypeAliasesT
def Pack(self, builder):
if self.v8 is not None:
if np is not None and type(self.v8) is np.ndarray:
v8 = builder.CreateNumpyVector(self.v8)
else:
TypeAliasesStartV8Vector(builder, len(self.v8))
for i in reversed(range(len(self.v8))):
builder.PrependByte(self.v8[i])
v8 = builder.EndVector()
if self.vf64 is not None:
if np is not None and type(self.vf64) is np.ndarray:
vf64 = builder.CreateNumpyVector(self.vf64)
else:
TypeAliasesStartVf64Vector(builder, len(self.vf64))
for i in reversed(range(len(self.vf64))):
builder.PrependFloat64(self.vf64[i])
vf64 = builder.EndVector()
TypeAliasesStart(builder)
TypeAliasesAddI8(builder, self.i8)
TypeAliasesAddU8(builder, self.u8)
TypeAliasesAddI16(builder, self.i16)
TypeAliasesAddU16(builder, self.u16)
TypeAliasesAddI32(builder, self.i32)
TypeAliasesAddU32(builder, self.u32)
TypeAliasesAddI64(builder, self.i64)
TypeAliasesAddU64(builder, self.u64)
TypeAliasesAddF32(builder, self.f32)
TypeAliasesAddF64(builder, self.f64)
if self.v8 is not None:
TypeAliasesAddV8(builder, v8)
if self.vf64 is not None:
TypeAliasesAddVf64(builder, vf64)
typeAliases = TypeAliasesEnd(builder)
return typeAliases
| {
"content_hash": "89f78e2993a7dcd3e4805783285f3253",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 135,
"avg_line_length": 37.056478405315616,
"alnum_prop": 0.6383360229514076,
"repo_name": "alexames/flatbuffers",
"id": "b3020490a0b63c151d6040d5726c954d99c27efa",
"size": "11246",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/MyGame/Example/TypeAliases.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1863"
},
{
"name": "C",
"bytes": "1110"
},
{
"name": "C#",
"bytes": "492386"
},
{
"name": "C++",
"bytes": "3622366"
},
{
"name": "CMake",
"bytes": "99988"
},
{
"name": "Dart",
"bytes": "405876"
},
{
"name": "Go",
"bytes": "139255"
},
{
"name": "Java",
"bytes": "510309"
},
{
"name": "JavaScript",
"bytes": "450564"
},
{
"name": "Kotlin",
"bytes": "353519"
},
{
"name": "Lua",
"bytes": "94919"
},
{
"name": "Makefile",
"bytes": "481"
},
{
"name": "Nim",
"bytes": "95777"
},
{
"name": "PHP",
"bytes": "183510"
},
{
"name": "Python",
"bytes": "677942"
},
{
"name": "Roff",
"bytes": "648"
},
{
"name": "Ruby",
"bytes": "913"
},
{
"name": "Rust",
"bytes": "994801"
},
{
"name": "Shell",
"bytes": "24822"
},
{
"name": "Starlark",
"bytes": "35596"
},
{
"name": "Swift",
"bytes": "649283"
},
{
"name": "TypeScript",
"bytes": "598886"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('widgetbox', '0016_list_style'),
]
operations = [
migrations.RemoveField(
model_name='list',
name='tag',
),
migrations.AlterField(
model_name='list',
name='style',
field=models.CharField(default=b'widgetbox/ul.html', help_text=b'Choose list style (template)', max_length=100, choices=[(b'widgetbox/ul.html', b'Unordered list (ul)'), (b'widgetbox/ol.html', b'Ordered list (ol)')]),
),
]
| {
"content_hash": "162c77f0ad877f0a9f77c740a343a723",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 228,
"avg_line_length": 29.318181818181817,
"alnum_prop": 0.5906976744186047,
"repo_name": "logithr/djangocms-widgetbox",
"id": "a9e7748b26eadccbdd08e1c186badc5aab10e210",
"size": "669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "widgetbox/migrations/0017_auto_20160303_1551.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4733"
},
{
"name": "Python",
"bytes": "33610"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class OperationMetadataPaged(Paged):
"""
A paging container for iterating over a list of :class:`OperationMetadata <azure.mgmt.devtestlabs.models.OperationMetadata>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[OperationMetadata]'}
}
def __init__(self, *args, **kwargs):
super(OperationMetadataPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "b2c94f767ea4f78ea55e5d510e50c1e2",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 135,
"avg_line_length": 30.9375,
"alnum_prop": 0.6323232323232323,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "f0fa25a37beda8d4388ed94af9f8b4ac4d39f006",
"size": "969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/operation_metadata_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
__author__ = 'cadu'
from models import Feed, Magazine, Category, User, UserFavorite, Administrator, CoverArticle, PROFILE_OPTIONS
from serializers import AdministratorSerializer, MagazineSerializer, CategorySerializer, CoverArticleSerializer
from django.http import HttpResponse
from rest_framework.views import APIView
from rest_framework.parsers import JSONParser, MultiPartParser, FileUploadParser
from django.utils.decorators import decorator_from_middleware
from django.utils.six import BytesIO
from django.core import serializers
from django.core.files.storage import default_storage
from django.db import transaction
from middleware import BackofficeAuthMiddleware, MobileMiddleware
import solr_service
import pprint
import json
import Queue
import threading
import thread
import sys
import services
import traceback
import settings
import logging
import uuid
import jwt_util
from custom_exception import CustomException, CustomErrorMessages
import validator
import email_sender
import bcrypt
import choices
# Get logger.
logger = logging.getLogger(__name__)
# region Mobile.
def get_user_by_header_request(request):
facebook_id = request.META.get('HTTP_FACEBOOKID', None)
google_id = request.META.get('HTTP_GOOGLEID', None)
if facebook_id is not None:
try:
user = User.objects.get(facebook_id=facebook_id)
return user
except User.DoesNotExist:
pass
except:
print "Unexpected error:", sys.exc_info()[0]
raise
else:
try:
user = User.objects.get(google_id=google_id)
return user
except User.DoesNotExist:
pass
except:
logger.critical(traceback.format_exc())
raise
@decorator_from_middleware(MobileMiddleware)
def login_mobile(self):
try:
logger.info('Handling /login.')
if self.method == 'POST':
data = json.loads(self.body)
email = data.get('email', None)
name = data.get('name', None)
language = data.get('language', None)
font_size = data.get('font_size', None)
facebook_id = self.META.get('HTTP_FACEBOOKID', None)
google_id = self.META.get('HTTP_GOOGLEID', None)
# Try get by email. If user was not found, user = None for now.
if email is not None:
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = None
# If email or user is None, try to get by social id.
if email is None or user is None:
if facebook_id is not None:
try:
user = User.objects.get(facebook_id=facebook_id)
except User.DoesNotExist:
user = None
elif google_id is not None:
try:
user = User.objects.get(google_id=google_id)
except User.DoesNotExist:
user = None
if user is None:
user = User(email=email, name=name, facebook_id=facebook_id, google_id=google_id, language=language,
font_size=font_size)
user.save()
else:
if user.facebook_id is None and facebook_id is not None:
user.facebook_id = facebook_id
user.save()
elif user.google_id is None and google_id is not None:
user.google_id = google_id
user.save()
response = dict()
response['user'] = user.to_dict()
response['solr_version'] = services.solr_repository_version()
response["feeds"] = dict();
feeds = Feed.objects.filter(user=user.id)
for feed in feeds:
feed_response = dict()
feed_response["feed_name"] = feed.feed_name
feed_response["magazines"] = list(
Magazine.objects.order_by('magazine_name').values_list('id', flat=True).filter(feed=feed))
response["feeds"][feed.id] = feed_response
response['favorites'] = list(UserFavorite.objects.values_list('article_id', flat=True).filter(user=user))
return HttpResponse(json.dumps(response), status=200, content_type="application/json")
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def collection(self):
try:
logger.info('Handling /collection.')
if self.method == 'GET':
return HttpResponse(json.dumps(choices.collections), status=200)
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def home(self):
try:
logger.info('Handling /home.')
if self.method == 'GET':
user = get_user_by_header_request(self)
user_id = user.id
feed_ids = list(Feed.objects.filter(user=user_id).values_list('id', flat=True))
magazine_ids = set()
for feed_id in feed_ids:
magazines = list(
Magazine.objects.filter(feeds=feed_id).order_by('magazine_name').values_list('id', flat=True))
i = 0
for magazine_id in magazines:
if i < 3:
magazine_ids.add(magazine_id)
i += 1
else:
break
q = Queue.Queue()
count_calls = 0
for magazine_id in magazine_ids:
count_calls += 1
t = threading.Thread(target=services.article_find_by_magazine_id, args=(magazine_id, q))
t.daemon = True
t.start()
response = dict()
i = 0
while i < count_calls:
s = q.get()
response[s['magazine_id']] = s['response']
i += 1
return HttpResponse(json.dumps(response), status=200, content_type="application/json")
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def read_favorite(self):
logger.info('Handling /favorite/read.')
try:
if self.method == 'GET':
user = get_user_by_header_request(self)
user_id = user.id
response = services.article_find_favorite_by_user_id(user_id)
return HttpResponse(json.dumps(response), status=200)
else:
return HttpResponse(status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def create_favorite(self):
logger.info('Handling /favorite/create.')
try:
if self.method == 'POST':
user = get_user_by_header_request(self)
user_id = user.id
data = json.loads(self.body)
article_id = data.get('article_id', None)
if article_id is None:
return HttpResponse('You should provide article_id parameter.', status=400)
try:
UserFavorite.objects.get(user_id=user_id, article_id=article_id)
except UserFavorite.DoesNotExist:
user_favorite = UserFavorite(None, user_id, article_id)
user_favorite.save()
return HttpResponse(json.dumps({}), status=200)
else:
return HttpResponse(status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def delete_favorite(self):
logger.info('Handling /favorite/delete.')
try:
if self.method == 'POST':
user = get_user_by_header_request(self)
user_id = user.id
data = json.loads(self.body)
article_id = data.get('article_id', None)
try:
user_favorite = UserFavorite.objects.get(user_id=user_id, article_id=article_id)
user_favorite.delete()
except UserFavorite.DoesNotExist:
pass
return HttpResponse(json.dumps({}), status=200)
else:
return HttpResponse(status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def search(self):
logger.info('Handling /magazine/search.')
try:
if self.method == 'POST':
data = json.loads(self.body)
text_search = data.get('q', '')
magazines = list(Magazine.objects.filter(magazine_name__icontains=text_search)
.order_by('magazine_name').values_list('id', flat=True))
return HttpResponse(json.dumps(magazines), status=200)
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
@transaction.atomic
def create_feed(self):
logger.info('Handling /feed/create.')
try:
if self.method == 'POST':
with transaction.atomic():
user = get_user_by_header_request(self)
user_id = user.id
data = json.loads(self.body)
feed_name = data.get('feed_name', None)
magazine_ids = data.get('magazines', None)
if feed_name is None or magazine_ids is None:
return HttpResponse('You should provide feed_name and magazines parameters.', status=400)
try:
Feed.objects.get(user=user_id, feed_name=feed_name)
return HttpResponse('Already exists a feed with this name.', status=400)
except Feed.DoesNotExist:
feed = Feed(None, feed_name, user_id)
feed.save()
for magazine_id in magazine_ids:
magazine = Magazine.objects.get(id=magazine_id)
feed.magazines.add(magazine);
feed.save()
response = dict()
response["feed_id"] = feed.id
response["feed_name"] = feed.feed_name
response["magazines"] = list(
Magazine.objects.filter(feeds=feed.id).order_by('magazine_name').values_list('id', flat=True))
return HttpResponse(json.dumps(response), status=200)
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def update_feed(self):
logger.info('Handling /feed/update.')
try:
if self.method == 'POST':
user = get_user_by_header_request(self)
data = json.loads(self.body)
feed_id = data.get('feed_id', None)
add = data.get('add', None)
remove = data.get('remove', None)
if feed_id is None or add is None or remove is None:
return HttpResponse('You should provide feed_id, add and remove parameters.', status=400)
feed = Feed.objects.get(id=feed_id)
if feed.user.id != user.id:
return HttpResponse('You don\'t have permission to change this resource.', status=403)
for magazine_id in add:
feed.magazines.add(magazine_id)
for magazine_id in remove:
feed.magazines.remove(magazine_id)
feed.save()
response = dict()
response["feed_id"] = feed.id
response["feed_name"] = feed.feed_name
response["magazines"] = list(
Magazine.objects.filter(feeds=feed.id).order_by('magazine_name').values_list('id', flat=True))
return HttpResponse(json.dumps(response), status=200)
else:
return HttpResponse('', status=405)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(MobileMiddleware)
def delete_feed(self):
logger.info('Handling /feed/delete.')
if self.method == 'POST':
try:
user = get_user_by_header_request(self)
data = json.loads(self.body)
feed_id = data.get('feed_id', None)
feed = Feed.objects.get(id=feed_id)
if feed.user.id != user.id:
return HttpResponse('You don\'t have permission to change this resource.', status=403)
feed.delete()
return HttpResponse(json.dumps({}), status=200)
except:
print "Unexpected error:", sys.exc_info()[0]
else:
return HttpResponse('', status=405)
@decorator_from_middleware(MobileMiddleware)
def read_user(self):
if self.method == 'GET':
user = get_user_by_header_request(self)
user_id = user.id
response = services.user_get_general_state(user_id)
return HttpResponse(json.dumps(response), status=200)
else:
return HttpResponse('', status=405)
@decorator_from_middleware(MobileMiddleware)
def user_change_language(self):
logger.info('Handling /user/language.')
if self.method == 'POST':
user = get_user_by_header_request(self)
data = json.loads(self.body)
language = data.get('language', None)
if language is None:
return HttpResponse('You should provide language parameter: PT, EN or ES.', status=400)
if not language == 'pt' and not language == 'en' and not language == 'es':
return HttpResponse('Malformed syntax in language parameter.', status=400)
user.language = language
user.save()
return HttpResponse(json.dumps({}), status=200)
else:
return HttpResponse('', status=405)
@decorator_from_middleware(MobileMiddleware)
def user_change_font_size(self):
logger.info('Handling /user/font.')
if self.method == 'POST':
user = get_user_by_header_request(self)
data = json.loads(self.body)
font_size = data.get('font_size', None)
if font_size is None:
return HttpResponse('You should provide font_size parameter: S, M or L.', status=400)
if not font_size == 'S' and not font_size == 'M' and not font_size == 'L':
return HttpResponse('Malformed syntax in font_size parameter.', status=400)
user.font_size = font_size
user.save()
return HttpResponse(json.dumps({}), status=200)
else:
return HttpResponse('', status=405)
@decorator_from_middleware(MobileMiddleware)
def list_category_magazines(self):
if self.method == 'GET':
try:
response = dict()
response['solr_version'] = services.solr_repository_version()
order_en = list(Category.objects.all().order_by('category_name_en').values_list('id', flat=True))
order_pt = list(Category.objects.all().order_by('category_name_pt').values_list('id', flat=True))
order_es = list(Category.objects.all().order_by('category_name_es').values_list('id', flat=True))
response['categories_order_en'] = order_en
response['categories_order_pt'] = order_pt
response['categories_order_es'] = order_es
categories = Category.objects.all()
response['categories'] = {}
for category in categories:
response['categories'][category.id] = category.to_dict()
magazines = list(
Magazine.objects.filter(categories=category).order_by('magazine_name').values_list('id', flat=True))
response['categories'][category.id]['magazines'] = magazines
magazines = Magazine.objects.all()
response['magazines'] = {}
for magazine in magazines:
response['magazines'][magazine.id] = magazine.to_dict()
response['magazines_order'] = list(
Magazine.objects.all().order_by('magazine_name').values_list('id', flat=True))
return HttpResponse(json.dumps(response), status=200)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
else:
return HttpResponse('', status=405)
@decorator_from_middleware(MobileMiddleware)
def solr_version(self):
if self.method == 'GET':
try:
return HttpResponse(json.dumps(services.solr_repository_version()), status=200)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
else:
return HttpResponse('', status=405)
# endregion
# region BACKOFFICE
# region LOGIN/SIGNIN
def bo_administrator_login(self):
try:
logger.info('Handling /backoffice/users/login.')
if self.method == 'POST':
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
credentials = dict()
credentials['email'] = data.get('email', None)
credentials['password'] = data['password'].encode('utf-8')
try:
administrator = Administrator.objects.get(email=credentials['email'], password__isnull=False, active=True)
except Administrator.DoesNotExist:
logger.warning('Login with administrator ({email}) failed. Administrator not found.'.format(email=credentials['email']))
raise CustomException(CustomErrorMessages.USER_NOT_FOUND)
if not bcrypt.hashpw(credentials['password'], administrator.password.encode('utf-8')) == administrator.password.encode('utf-8'):
logger.warning('Login with administrator ({email}) failed. Password doesn\'t match.'.format(email=credentials['email']))
raise CustomException(CustomErrorMessages.INVALID_CREDENTIALS)
response = dict()
response['token'] = jwt_util.jwt_auth_generate_token(credentials)
return HttpResponse(json.dumps(response), status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_administrator_me(self):
try:
logger.info('Handling /backoffice/users/me.')
if self.method == 'GET':
token = self.META.get('HTTP_AUTHORIZATION', None)
user = jwt_util.jwt_auth_get_user(token)
return HttpResponse(json.dumps(AdministratorSerializer(user).data), status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_administrator_change_password(self):
try:
logger.info('Handling /backoffice/users/change-password.')
if self.method == 'POST':
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
if not 'current_password' in data or not 'new_password' in data:
return HttpResponse(status=401)
current_password = data['current_password'].encode('utf-8')
new_password = data['new_password'].encode('utf-8')
token = self.META.get('HTTP_AUTHORIZATION', None)
user = jwt_util.jwt_auth_get_user(token)
if not bcrypt.hashpw(current_password, user.password.encode('utf-8')) == user.password.encode('utf-8'):
raise CustomErrorMessages(CustomErrorMessages.INVALID_CREDENTIALS)
hashed_password = bcrypt.hashpw(new_password, bcrypt.gensalt())
user.password = hashed_password
user.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
def bo_administrator_set_password(self):
try:
logger.info('Handling /backoffice/users/set-password.')
if self.method == 'POST':
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
if not 'token' in data or not 'new_password' in data:
return HttpResponse(status=401)
token = data.get('token', None)
new_password = data['new_password'].encode('utf-8')
user = jwt_util.jwt_auth_get_user(token)
hashed_password = bcrypt.hashpw(new_password, bcrypt.gensalt())
user.password = hashed_password
user.save()
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
def bo_administrator_recover_password(self):
try:
logger.info('Handling /backoffice/users/recover-password.')
if self.method == 'POST':
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
if 'email' not in data:
return HttpResponse(status=401)
user_credentials = dict()
user_credentials['email'] = data.get('email', None)
if not Administrator.objects.filter(email=user_credentials['email']).exists():
raise CustomException(CustomErrorMessages.USER_NOT_FOUND)
token = jwt_util.jwt_recovery_generate_token(user_credentials)
thread.start_new_thread(email_sender.send_password_recovery_email, (user_credentials['email'], token))
logger.info('Recovery token: ' + str(token))
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
def bo_administrator_validate_recovery_token(self):
try:
logger.info('Handling /backoffice/users/validate-recovery-token.')
if self.method == 'POST':
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
if 'token' not in data:
return HttpResponse(status=401)
user = jwt_util.jwt_recovery_get_user(data['token'])
return HttpResponse(json.dumps(AdministratorSerializer(user).data), status=200)
else:
return HttpResponse(status=405)
except CustomException as e:
return HttpResponse(e.message, status=450)
except:
logger.critical(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
# endregion
# region ADMINISTRATOR CRUD
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_administrator_save(self):
try:
logger.info('Handling /backoffice/users/save.')
if self.method == 'POST':
token = self.META.get('HTTP_AUTHORIZATION', None)
user = jwt_util.jwt_auth_get_user(token)
stream = BytesIO(self.body)
data = JSONParser().parse(stream)
if user.profile == 0:
removed_magazines = Magazine.objects.all().values_list('id', flat=True)
else:
removed_magazines = user.magazines.all().values_list('id', flat=True)
new_user = False
try:
admin = Administrator.objects.get(email=data.get('email', None))
admin.magazines = admin.magazines.exclude(id__in=removed_magazines)
except Administrator.DoesNotExist:
admin = None
new_user = True
serializer = AdministratorSerializer(admin, data=data)
if not serializer.is_valid():
return HttpResponse(status=401)
if serializer.validated_data.get('id', None) is None and admin is not None:
raise CustomException(CustomErrorMessages.USER_ALREADY_EXISTS)
magazines = serializer.validated_data.get('magazines', [])
magazines = [val['id'] for val in magazines]
if not (user.profile == 0 or (set(magazines).issubset(set(removed_magazines)) and serializer.validated_data.get('profile', None) == 2)):
raise CustomException(CustomErrorMessages.NOT_ALLOWED_FOR_PROFILE)
admin = serializer.save()
if new_user:
user_credentials = dict()
user_credentials['email'] = admin.email
token = jwt_util.jwt_recovery_generate_token(user_credentials)
thread.start_new_thread(email_sender.send_welcome_email, (admin.email, token))
if not user.profile == 0:
magazines = admin.magazines.filter(id__in=user.magazines.all().values_list('id', flat=True))
admin = AdministratorSerializer(admin).data
admin['magazines'] = MagazineSerializer(magazines, many=True).data
return HttpResponse(json.dumps(AdministratorSerializer(admin).data), status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_administrator_list(self):
try:
logger.info('Handling /backoffice/users/list.')
if self.method == 'GET':
token = self.META.get('HTTP_AUTHORIZATION', None)
user = jwt_util.jwt_auth_get_user(token)
# magazines_id = user.magazines.all().values_list('id', flat=True)
if user.profile == 0:
queryset = Administrator.objects.filter(active=True).order_by('-create_time')
administrators = AdministratorSerializer(queryset, many = True).data
elif user.profile == 1:
queryset = Administrator.objects.filter(active=True, profile=2).order_by('-create_time')
administrators = []
for result in queryset:
admin = AdministratorSerializer(result).data
logger.info(type(admin))
logger.info(admin)
admin['magazines'] = MagazineSerializer(result.magazines.filter(id__in=user.magazines.all().values_list('id', flat=True)), many=True).data
logger.info(admin)
administrators.append(admin)
else:
administrators = []
return HttpResponse(json.dumps(administrators), status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_administrator_delete(self, pk):
try:
logger.info('Handling /backoffice/users/delete')
if self.method == 'DELETE':
try:
administrator = Administrator.objects.get(id=pk)
administrator.active = False
administrator.save()
except Administrator.DoesNotExist:
raise CustomException(CustomErrorMessages.USER_NOT_FOUND)
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
# endregion
# region CATEGORIES
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_category_list(self):
try:
logger.info('Handling /backoffice/category.svc/list.')
if self.method == 'GET':
categories = Category.objects.all()
return HttpResponse(json.dumps(CategorySerializer(categories, many=True).data), status=200)
else:
return HttpResponse(status=405)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
# endregion
# region MAGAZINES
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_magazine_list(self):
try:
logger.info('Handling /backoffice/magazine.svc/list.')
if self.method == 'GET':
try:
token = self.META.get('HTTP_AUTHORIZATION', None)
user = jwt_util.jwt_auth_get_user(token)
if user.profile == 0:
magazines = Magazine.objects.all()
else:
magazines = Magazine.objects.filter(administrator=user)
return HttpResponse(json.dumps(MagazineSerializer(magazines, many=True).data), status=200)
except Administrator.DoesNotExist:
raise CustomException(CustomErrorMessages.USER_NOT_FOUND)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
# endregion
@decorator_from_middleware(BackofficeAuthMiddleware)
@transaction.atomic
def bo_cover_save(self):
try:
with transaction.atomic():
logger.info('Handling /backoffice/articles/upload-cover.')
if self.method == 'POST':
token = self.META.get('HTTP_AUTHORIZATION', None)
article_id = self.POST.get('article_id', None)
image = self.FILES['file']
article = solr_service.get_article(article_id)
user = jwt_util.jwt_auth_get_user(token)
if not validator.user_can_perform_cover_management(user.id, article):
raise CustomException(CustomErrorMessages.NOT_ALLOWED_FOR_PROFILE)
try:
cover_article = CoverArticle.objects.get(article_id=article_id)
cover_article.image.delete()
except CoverArticle.DoesNotExist:
cover_article = CoverArticle(article_id=article_id)
cover_article.magazine = Magazine.objects.get(id=article['journal_id'])
cover_article.administrator = user
cover_article.save()
cover_article.image.save(str(uuid.uuid4()) + '.png', image)
cover_article.save()
article['image_upload_date'] = cover_article.upload_time
article['image_upload_path'] = cover_article.image
article['image_uploader'] = user.name
solr_service.add_article(article)
return HttpResponse(json.dumps(CoverArticleSerializer(cover_article).data), status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
@decorator_from_middleware(BackofficeAuthMiddleware)
def bo_cover_delete(self, article_id):
try:
logger.info('Handling /backoffice/articles/delete-cover/' + str(article_id))
if self.method == 'DELETE':
token = self.META.get('HTTP_AUTHORIZATION', None)
article = solr_service.get_article(article_id)
user = jwt_util.jwt_auth_get_user(token)
if not validator.user_can_perform_cover_management(user.id, article):
raise CustomException(CustomErrorMessages.NOT_ALLOWED_FOR_PROFILE)
try:
cover_article = CoverArticle.objects.get(article_id=article_id)
cover_article.image.delete()
cover_article.delete()
except CoverArticle.DoesNotExist:
pass
article['image_upload_date'] = None
article['image_upload_path'] = None
article['image_uploader'] = None
solr_service.add_article(article)
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except CustomException as ce:
return HttpResponse(ce.message, status=450)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
def bo_cover_get(self, article_id):
try:
logger.info('Handling /backoffice/articles/get-cover')
if self.method == 'GET':
try:
cover_article = CoverArticle.objects.get(article_id=article_id)
return HttpResponse(cover_article.image, content_type='image/png', status=200)
except CoverArticle.DoesNotExist:
pass
return HttpResponse(status=200)
else:
return HttpResponse(status=405)
except:
logger.error(traceback.format_exc())
return HttpResponse(CustomErrorMessages.UNEXPECTED_ERROR, status=500)
# endregion
| {
"content_hash": "b09f8dba10662bfbd27a78a58e295bff",
"timestamp": "",
"source": "github",
"line_count": 976,
"max_line_length": 158,
"avg_line_length": 35.71823770491803,
"alnum_prop": 0.6048592983563295,
"repo_name": "Infobase/pulsemob_webservices",
"id": "82b5593214ad6d79099b6378d611a8febe37d0d3",
"size": "34877",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pulsemob_webservices/webservices/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Nginx",
"bytes": "892"
},
{
"name": "Python",
"bytes": "110237"
}
],
"symlink_target": ""
} |
"""
Greedy implementation of the vision problem.
Written by Ooj Amit Srivastava.
"""
from typing import List
def solve(satisfactions : List[int], walkers : List[int]) -> int:
# Find best pair go back-n-forth
# Pair goodness = sum(pair) * U-index(pair) + sum(0->pair)
# This is optimal because the walker would keep going
# back-n-forth across the pair with highest satisfaction
# as any other path would bring less total satisfaction.
# easy and intuitive to spot if you draw any array.
scores = {u: [] for u in walkers}
# there is a max telescope that a walker can reach
# which is equal to its u.
for i in range(min(len(satisfactions), max(walkers)) - 1):
for w in walkers:
if i >= w:
break
turns = w-i
# remaining turns would be spent going back-n-forth
value = ((turns+1)//2)*satisfactions[i] + (turns//2)*satisfactions[i+1]
# sum of S to reach best pair
cost = sum(satisfactions[:i])
scores[w].append(value+cost)
# Since "cost" is added to each score
# The max score is optimal path
# and can be added to total
total = 0
for u in walkers:
total += max(scores[u])
return total
if __name__ == "__main__":
t, w = input().split()
# Satisfaction for ith telescope
# S = [1, 2, 3, -1, -1, 10]
S = [int(input()) for _ in range(int(t))]
# Path length of jth walker
# U = (23, 24)
U = [int(input()) for _ in range(int(w))]
print(solve(S, U))
| {
"content_hash": "d33559fb8392f9e8ebbd2193204e9060",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 83,
"avg_line_length": 31.58823529411765,
"alnum_prop": 0.5704531346989448,
"repo_name": "matthewelse/british-informatics-olympiad",
"id": "c398faef07486748d67f9056f4ce86ade708de1b",
"size": "1611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2020/vision/vision_greedy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "603"
},
{
"name": "C++",
"bytes": "16770"
},
{
"name": "Python",
"bytes": "78910"
}
],
"symlink_target": ""
} |
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.hdr_test import HdrTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestHdrTest(unittest.TestCase):
"""HdrTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test HdrTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.hdr_test.HdrTest() # noqa: E501
if include_optional :
return HdrTest(
hdr_standard = 'GenericHdr',
max_fall_max_enabled = True,
max_fall_max = 56,
max_fall_error_enabled = True,
max_fall_error = 56,
max_cll_max_enabled = True,
max_cll_max = 56,
max_cll_error_enabled = True,
max_cll_error = 56,
always_calculate = True,
always_report = True,
reject_on_error = True,
checked = True
)
else :
return HdrTest(
)
def testHdrTest(self):
"""Test HdrTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "53e8e64d79dcd509c952e818c911649b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 28.158730158730158,
"alnum_prop": 0.5676437429537767,
"repo_name": "Telestream/telestream-cloud-python-sdk",
"id": "026e27946dd58396e66f2b490b451a29c69cce86",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telestream_cloud_qc_sdk/test/test_hdr_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1339719"
},
{
"name": "Shell",
"bytes": "6712"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/google/android/gms'
_MODULE_NAME = 'play-services-instantapps'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| {
"content_hash": "c2817d273fbd9e78420acfb4b038d90e",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.771428571428572,
"alnum_prop": 0.5962230215827338,
"repo_name": "chromium/chromium",
"id": "3acce0731de437786a4b690dfda2414bf50fb606",
"size": "2491",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/com_google_android_gms_play_services_instantapps/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import prefix_limit
from . import config
from . import state
class ipv6_unicast(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv6-unicast. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv6 unicast configuration options
"""
__slots__ = ("_path_helper", "_extmethods", "__prefix_limit", "__config", "__state")
_yang_name = "ipv6-unicast"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"ipv6-unicast",
]
def _get_prefix_limit(self):
"""
Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit (container)
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
return self.__prefix_limit
def _set_prefix_limit(self, v, load=False):
"""
Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_limit() directly.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefix_limit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__prefix_limit = t
if hasattr(self, "_set"):
self._set()
def _unset_prefix_limit(self):
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/config (container)
YANG Description: Configuration parameters for common IPv4 and IPv6 unicast
AFI-SAFI options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters for common IPv4 and IPv6 unicast
AFI-SAFI options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/state (container)
YANG Description: State information for common IPv4 and IPv6 unicast
parameters
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for common IPv4 and IPv6 unicast
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("prefix_limit", prefix_limit), ("config", config), ("state", state)]
)
from . import prefix_limit
from . import config
from . import state
class ipv6_unicast(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/global/afi-safis/afi-safi/ipv6-unicast. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: IPv6 unicast configuration options
"""
__slots__ = ("_path_helper", "_extmethods", "__prefix_limit", "__config", "__state")
_yang_name = "ipv6-unicast"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"global",
"afi-safis",
"afi-safi",
"ipv6-unicast",
]
def _get_prefix_limit(self):
"""
Getter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit (container)
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
return self.__prefix_limit
def _set_prefix_limit(self, v, load=False):
"""
Setter method for prefix_limit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/prefix_limit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_prefix_limit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prefix_limit() directly.
YANG Description: Configure the maximum number of prefixes that will be
accepted from a peer
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prefix_limit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=prefix_limit.prefix_limit, is_container='container', yang_name="prefix-limit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__prefix_limit = t
if hasattr(self, "_set"):
self._set()
def _unset_prefix_limit(self):
self.__prefix_limit = YANGDynClass(
base=prefix_limit.prefix_limit,
is_container="container",
yang_name="prefix-limit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/config (container)
YANG Description: Configuration parameters for common IPv4 and IPv6 unicast
AFI-SAFI options
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters for common IPv4 and IPv6 unicast
AFI-SAFI options
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/state (container)
YANG Description: State information for common IPv4 and IPv6 unicast
parameters
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/global/afi_safis/afi_safi/ipv6_unicast/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information for common IPv4 and IPv6 unicast
parameters
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
prefix_limit = __builtin__.property(_get_prefix_limit, _set_prefix_limit)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict(
[("prefix_limit", prefix_limit), ("config", config), ("state", state)]
)
| {
"content_hash": "e436d7c2f6adffce3c76938cd9630d58",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 395,
"avg_line_length": 39.71076923076923,
"alnum_prop": 0.5841469084146909,
"repo_name": "napalm-automation/napalm-yang",
"id": "e21a4ae4169a6c1cb7a759053fe3d6e6d46b419c",
"size": "25836",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/global_/afi_safis/afi_safi/ipv6_unicast/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
import argparse
import copy
import sys
from ros_buildfarm.argument import add_argument_arch
from ros_buildfarm.argument import add_argument_build_name
from ros_buildfarm.argument import add_argument_cache_dir
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.argument import \
add_argument_distribution_repository_key_files
from ros_buildfarm.argument import add_argument_distribution_repository_urls
from ros_buildfarm.argument import add_argument_dockerfile_dir
from ros_buildfarm.argument import add_argument_os_code_name
from ros_buildfarm.argument import add_argument_rosdistro_name
from ros_buildfarm.common import get_distribution_repository_keys
from ros_buildfarm.common import get_user_id
from ros_buildfarm.templates import create_dockerfile
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description="Run the 'check_sync_criteria' job")
add_argument_config_url(parser)
add_argument_rosdistro_name(parser)
add_argument_build_name(parser, 'release')
add_argument_os_code_name(parser)
add_argument_arch(parser)
add_argument_distribution_repository_urls(parser)
add_argument_distribution_repository_key_files(parser)
add_argument_cache_dir(parser)
add_argument_dockerfile_dir(parser)
args = parser.parse_args(argv)
data = copy.deepcopy(args.__dict__)
data.update({
'distribution_repository_urls': args.distribution_repository_urls,
'distribution_repository_keys': get_distribution_repository_keys(
args.distribution_repository_urls,
args.distribution_repository_key_files),
'uid': get_user_id(),
})
create_dockerfile(
'release/release_check_sync_criteria_task.Dockerfile.em',
data, args.dockerfile_dir)
if __name__ == '__main__':
main()
| {
"content_hash": "1e4638da6fc0ab806e91e88d8ccd364c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 37.46938775510204,
"alnum_prop": 0.7478213507625272,
"repo_name": "130s/ros_buildfarm",
"id": "72b8e2335d80666396e394293a1b66daa0cf1cbb",
"size": "1860",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/release/run_check_sync_criteria_job.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4148"
},
{
"name": "EmberScript",
"bytes": "175039"
},
{
"name": "JavaScript",
"bytes": "10890"
},
{
"name": "Python",
"bytes": "347184"
},
{
"name": "Shell",
"bytes": "5932"
}
],
"symlink_target": ""
} |
import json
import re
import flavorsync.test.config as config
from lxml import etree, objectify
from flavorsync.model import Infrastructure, Flavor, FlavorCollection
XML_MIMETYPE = 'application/xml'
JSON_MIMETYPE = 'application/json'
WRONG_MIMETYPE = 'application/whatever'
JSON_EXAMPLE_PAYLOADS_DIR = 'flavorsync/test/example_payloads/json/'
XML_EXAMPLE_PAYLOADS_DIR = 'flavorsync/test/example_payloads/xml/'
def load_xml_example_as_string(filename):
return _load_example_file_as_string(XML_EXAMPLE_PAYLOADS_DIR + filename)
def load_json_example_as_string(filename):
return _load_example_file_as_string(JSON_EXAMPLE_PAYLOADS_DIR + filename)
def _load_example_file_as_string(file_path):
file = open(file_path, 'r')
payload = file.read()
file.close()
if 'infrastructure_request' in file_path:
payload = payload.replace("{OPENSTACK_TEST_KEYSTONE_URL}",
config.OPENSTACK_TEST_KEYSTONE_URL)
payload = payload.replace("{OPENSTACK_TEST_USERNAME}",
config.OPENSTACK_TEST_USERNAME)
payload = payload.replace("{OPENSTACK_TEST_PASSWORD}",
config.OPENSTACK_TEST_PASSWORD)
payload = payload.replace("{OPENSTACK_TEST_TENANT}",
config.OPENSTACK_TEST_TENANT)
return payload
def load_xml_from_file(filename):
contents = load_clean_xml_payload(filename)
root = objectify.fromstring(contents)
return root
def load_clean_xml_payload(filename):
payload = load_xml_example_as_string(filename)
payload = remove_xml_header(payload)
payload = remove_non_usable_characters(payload)
return payload
def load_json_from_file(filename):
contents = load_json_example_as_string(filename)
data = json.loads(contents)
return data
def remove_xml_header(xml):
return re.sub("<\?.*\?>", "", xml)
def remove_non_usable_characters(xml):
parsed_xml = re.sub("\\n", "", xml)
parsed_xml = re.sub(" +<", "<", parsed_xml)
parsed_xml = re.sub("> +", ">", parsed_xml)
return parsed_xml
def json_are_equal(payload1, payload2):
if type(payload1) is dict:
payload1_json = payload1
elif type(payload1) is str:
payload1_json = json.loads(payload1)
if type(payload2) is dict:
payload2_json = payload2
elif type(payload2) is str:
payload2_json = json.loads(payload2)
return _order_json_data(payload1_json) == _order_json_data(payload2_json)
def _order_json_data(obj):
if isinstance(obj, dict):
return sorted((k, _order_json_data(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(_order_json_data(x) for x in obj)
else:
return obj
def create_example_infrastructure():
return Infrastructure('Mordor',
config.OPENSTACK_TEST_KEYSTONE_URL,
config.OPENSTACK_TEST_USERNAME,
config.OPENSTACK_TEST_PASSWORD,
config.OPENSTACK_TEST_TENANT)
def _create_secondary_example_infrastructure():
return Infrastructure('SaoPaulo', 'http://55.66.77.88:35357/',
'myUsername', 'myPassword', 'myTenant')
def create_example_flavor(infrastructure=None):
if not infrastructure:
infrastructure = create_example_infrastructure()
infrastructures = [infrastructure]
return Flavor('567b200e-0aca-49e0-8e9a-8c1f6ad3abe2', 'insane', 640,
1232896, 1262485504, 0, False, False, infrastructures)
def create_secondary_example_flavor(infrastructure=None):
if not infrastructure:
infrastructure = _create_secondary_example_infrastructure()
infrastructures = [infrastructure]
return Flavor('857dc211-e1f4-4cbe-b498-6847c14acb26', 'hpc', 16,
5120, 100, 0, False, True, infrastructures)
def create_example_flavor_collection(infrastructure=None):
flavors = [create_example_flavor(infrastructure), create_secondary_example_flavor(infrastructure)]
return FlavorCollection(flavors) | {
"content_hash": "3815760149afa685fb4bec836b71e5a6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 102,
"avg_line_length": 34.88235294117647,
"alnum_prop": 0.6535774512165743,
"repo_name": "Fiware/ops.Flavor-sync",
"id": "b539bf2a8b54675a800b7361763db5d943e467ea",
"size": "4151",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flavorsync/test/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "36933"
},
{
"name": "Python",
"bytes": "135827"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
VERSION = (1, 0, 0)
__version__ = '.'.join((str(x) for x in VERSION))
setup(
name='appetizer',
version=__version__,
description='Appetizer tools for test recording, replaying and functional testing',
author='Mingyuan Xia',
author_email='mxia@mxia.me',
url='https://github.com/appetizerio/appetizer-toolkit-py',
license='Apache v2',
packages=['appetizer'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| {
"content_hash": "7945a1b5587e3f61ea69cb01f5366c48",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 32.73076923076923,
"alnum_prop": 0.6263219741480611,
"repo_name": "appetizerio/replaykit.py",
"id": "41a47666874b36f176b99f08493997d41958ce35",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12535"
}
],
"symlink_target": ""
} |
"""Usage information for the main IPython applications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
from IPython.core import release
cl_usage = """\
=========
IPython
=========
Tools for Interactive Computing in Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the
system shell and more. IPython can also be embedded in running programs.
Usage
ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
If invoked with no options, it executes the file and exits, passing the
remaining arguments to the script, just as if you had specified the same
command with python. You may need to specify `--` before args to be passed
to the script, to prevent IPython from attempting to parse them. If you
specify the option `-i` before the filename, it will enter an interactive
IPython session after running the script, rather than exiting. Files ending
in .py will be treated as normal Python, but files ending in .ipy can
contain special IPython syntax (magic commands, shell expansions, etc.).
Almost all configuration in IPython is available via the command-line. Do
`ipython --help-all` to see all available options. For persistent
configuration, look into your `ipython_config.py` configuration file for
details.
This file is typically installed in the `IPYTHONDIR` directory, and there
is a separate configuration directory for each profile. The default profile
directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
C:\\Users\\YourUserName in most instances.
To initialize a profile with the default configuration file, do::
$> ipython profile create
and start editing `IPYTHONDIR/profile_default/ipython_config.py`
In IPython's documentation, we will refer to this directory as
`IPYTHONDIR`, you can change its default location by creating an
environment variable with this name and setting it to the desired path.
For more information, see the manual available in HTML and PDF in your
installation, or online at https://ipython.org/documentation.html.
"""
interactive_usage = """
IPython -- An enhanced Interactive Python
=========================================
IPython offers a fully compatible replacement for the standard Python
interpreter, with convenient shell features, special commands, command
history mechanism and output results caching.
At your system command line, type 'ipython -h' to see the command line
options available. This document only describes interactive features.
GETTING HELP
------------
Within IPython you have various way to access help:
? -> Introduction and overview of IPython's features (this screen).
object? -> Details about 'object'.
object?? -> More detailed, verbose information about 'object'.
%quickref -> Quick reference of all IPython specific syntax and magics.
help -> Access Python's own help system.
If you are in terminal IPython you can quit this screen by pressing `q`.
MAIN FEATURES
-------------
* Access to the standard Python help with object docstrings and the Python
manuals. Simply type 'help' (no quotes) to invoke it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the configuration file(s).
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. Certain
long strings (code, etc.) get snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Strings that are longer than the screen are printed
through the less pager.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
* Tab completion in the local namespace:
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
* Search previous command history in multiple ways:
- Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
through the history items that match what you've typed so far.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
- %hist: search history by index.
* Persistent command history across sessions.
* Logging of input with the ability to save and restore a working session.
* System shell with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
IPython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, IPython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments)::
In [1]: callable_ob arg1, arg2, arg3
and the input will be translated to this::
callable_ob(arg1, arg2, arg3)
This feature is off by default (in rare cases it can produce
undesirable side-effects), but you can activate it at the command-line
by starting IPython with `--autocall 1`, set it permanently in your
configuration file, or turn on at runtime with `%autocall 1`.
You can force auto-parentheses by using '/' as the first character
of a line. For example::
In [1]: /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work::
In [2]: print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse IPython)::
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work::
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
IPython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.::
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example::
In [1]: ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace)::
In [2]: ,my_function a b c # becomes my_function("a","b","c")
In [3]: ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work::
In [4]: x = ,my_function /home/me # syntax error
"""
interactive_usage_min = """\
An enhanced console for Python.
Some of its features are:
- Tab completion in the local namespace.
- Logging of input, see command-line options.
- System shell escape via ! , eg !ls.
- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
- Keeps track of locally defined variables via %who, %whos.
- Show object information with a ? eg ?x or x? (use ?? for more info).
"""
quick_reference = r"""
IPython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about IPython's 'magic' % functions.
Magic functions are prefixed by % or %%, and typically take their arguments
without parentheses, quotes or even commas for convenience. Line magics take a
single % and cell magics are prefixed with two %%.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
%timeit x=10 : time the 'x=10' statement with high precision.
%%timeit x=2**100
x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
counted. This is an example of a cell magic.
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture system command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec(_i81) : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history of current session.
%hist -g foo : Search command history of (almost) all sessions for 'foo'.
%hist -g : Command history of (almost) all sessions.
%hist 1/2-8 : Command history containing lines 2-8 of session 1.
%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
line 5 of 6 sessions ago.
%edit 0/ : Open editor to execute code with history of current session.
Autocall:
f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0],
"Type 'copyright', 'credits' or 'license' for more information\n" ,
"IPython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version),
]
default_banner = ''.join(default_banner_parts)
| {
"content_hash": "78ebb8ba5b1da748b2d95cd4ccad1845",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 112,
"avg_line_length": 39.758823529411764,
"alnum_prop": 0.6731765054002071,
"repo_name": "ipython/ipython",
"id": "53219bceb2562805537fe37632e19be6fb56760e",
"size": "13542",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "IPython/core/usage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "425"
},
{
"name": "Jupyter Notebook",
"bytes": "958133"
},
{
"name": "Makefile",
"bytes": "4675"
},
{
"name": "Python",
"bytes": "2318171"
},
{
"name": "Shell",
"bytes": "12155"
}
],
"symlink_target": ""
} |
import re
import os
import sys
import time
import unittest
import ConfigParser
from setuptools import setup, Command
class SQLiteTest(Command):
"""
Run the tests on SQLite
"""
description = "Run tests on SQLite"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'sqlite://'
os.environ['DB_NAME'] = ':memory:'
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class PostgresTest(Command):
"""
Run the tests on Postgres.
"""
description = "Run tests on Postgresql"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
os.environ['TRYTOND_DATABASE_URI'] = 'postgresql://'
os.environ['DB_NAME'] = 'test_' + str(int(time.time()))
from tests import suite
test_result = unittest.TextTestRunner(verbosity=3).run(suite())
if test_result.wasSuccessful():
sys.exit(0)
sys.exit(-1)
class RunAudit(Command):
"""Audits source code using PyFlakes for following issues:
- Names which are used but not defined or used before they are defined.
- Names which are redefined without having been used.
"""
description = "Audit source code with PyFlakes"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import sys
try:
import pyflakes.scripts.pyflakes as flakes
except ImportError:
print "Audit requires PyFlakes installed in your system."
sys.exit(-1)
warns = 0
# Define top-level directories
dirs = ('.')
for dir in dirs:
for root, _, files in os.walk(dir):
if root.startswith(('./build')):
continue
for file in files:
if file != '__init__.py' and file.endswith('.py'):
warns += flakes.checkPath(os.path.join(root, file))
if warns > 0:
print "Audit finished with total %d warnings." % warns
else:
print "No problems found in sourcecode."
config = ConfigParser.ConfigParser()
config.readfp(open('trytond_nereid/tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
install_requires = [
'pytz',
'flask>=0.10,<1.0',
'flask-wtf==0.12',
'blinker',
'speaklater',
'Flask-Babel>=0.9',
'Flask-Login==0.3.2',
]
MODULE2PREFIX = {
'email_queue': 'fio',
}
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
install_requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep, major_version,
minor_version, major_version, minor_version + 1
)
)
install_requires.append(
'trytond >= %s.%s, < %s.%s' %
(major_version, minor_version, major_version, minor_version + 1)
)
# Testing dependencies
tests_require = [
'mock',
'pycountry>=16.11.08',
]
setup(
name='trytond_nereid',
version=info.get('version'),
url='http://www.fulfil.io',
license='BSD',
author='Fulfil.IO',
author_email='support@fulfil.io',
description='Tryton - Web Framework',
long_description=open('README.rst').read(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
packages=[
'nereid',
'nereid.contrib',
'nereid.tests',
'trytond.modules.nereid',
'trytond.modules.nereid.tests',
'trytond.modules.nereid_test',
],
package_dir={
'nereid': 'nereid',
'nereid.contrib': 'nereid/contrib',
'nereid.tests': 'nereid/tests',
'trytond.modules.nereid': 'trytond_nereid',
'trytond.modules.nereid.tests': 'trytond_nereid/tests',
'trytond.modules.nereid_test': 'nereid_test_module',
},
package_data={
'trytond.modules.nereid': info.get('xml', []) +
['tryton.cfg', 'view/*.xml', 'locale/*.po', 'tests/*.rst'] +
['i18n/*.pot', 'i18n/pt_BR/LC_MESSAGES/*'] +
['templates/*.*', 'templates/tests/*.*'],
'trytond.modules.nereid_test': ['*.xml'] +
['tryton.cfg', 'locale/*.po', 'tests/*.rst'] +
['templates/*.*', 'templates/tests/*.*'],
},
zip_safe=False,
platforms='any',
entry_points="""
[trytond.modules]
nereid = trytond.modules.nereid
nereid_test = trytond.modules.nereid_test
""",
test_suite='tests.suite',
test_loader='trytond.test_loader:Loader',
tests_require=tests_require,
cmdclass={
'audit': RunAudit,
'test': SQLiteTest,
'test_on_postgres': PostgresTest,
},
)
| {
"content_hash": "6f7f4da3f90c13aef450c0f9ccbacb13",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 79,
"avg_line_length": 28.3,
"alnum_prop": 0.5779909136799596,
"repo_name": "fulfilio/nereid",
"id": "9456797dc442b8be621063c316daea590185f9f2",
"size": "6095",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1246"
},
{
"name": "Makefile",
"bytes": "644"
},
{
"name": "Python",
"bytes": "389690"
}
],
"symlink_target": ""
} |
"""Tests of fiona.env"""
import os
import sys
try:
from unittest import mock
except ImportError:
import mock
import fiona
from fiona import _env
from fiona.env import getenv, hasenv, ensure_env, ensure_env_with_credentials
from fiona.session import AWSSession, GSSession
def test_nested_credentials(monkeypatch):
"""Check that rasterio.open() doesn't wipe out surrounding credentials"""
@ensure_env_with_credentials
def fake_opener(path):
return fiona.env.getenv()
with fiona.env.Env(session=AWSSession(aws_access_key_id='foo', aws_secret_access_key='bar')):
assert fiona.env.getenv()['AWS_ACCESS_KEY_ID'] == 'foo'
assert fiona.env.getenv()['AWS_SECRET_ACCESS_KEY'] == 'bar'
monkeypatch.setenv('AWS_ACCESS_KEY_ID', 'lol')
monkeypatch.setenv('AWS_SECRET_ACCESS_KEY', 'wut')
gdalenv = fake_opener('s3://foo/bar')
assert gdalenv['AWS_ACCESS_KEY_ID'] == 'foo'
assert gdalenv['AWS_SECRET_ACCESS_KEY'] == 'bar'
def test_ensure_env_decorator(gdalenv):
@ensure_env
def f():
return getenv()['FIONA_ENV']
assert f() is True
def test_ensure_env_decorator_sets_gdal_data(gdalenv, monkeypatch):
"""fiona.env.ensure_env finds GDAL from environment"""
@ensure_env
def f():
return getenv()['GDAL_DATA']
monkeypatch.setenv('GDAL_DATA', '/lol/wut')
assert f() == '/lol/wut'
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_decorator_sets_gdal_data_prefix(find_file, gdalenv, monkeypatch, tmpdir):
"""fiona.env.ensure_env finds GDAL data under a prefix"""
@ensure_env
def f():
return getenv()['GDAL_DATA']
find_file.return_value = None
tmpdir.ensure("share/gdal/header.dxf")
monkeypatch.delenv('GDAL_DATA', raising=False)
monkeypatch.setattr(_env, '__file__', str(tmpdir.join("fake.py")))
monkeypatch.setattr(sys, 'prefix', str(tmpdir))
assert f() == str(tmpdir.join("share").join("gdal"))
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_decorator_sets_gdal_data_wheel(find_file, gdalenv, monkeypatch, tmpdir):
"""fiona.env.ensure_env finds GDAL data in a wheel"""
@ensure_env
def f():
return getenv()['GDAL_DATA']
find_file.return_value = None
tmpdir.ensure("gdal_data/header.dxf")
monkeypatch.delenv('GDAL_DATA', raising=False)
monkeypatch.setattr(_env, '__file__', str(tmpdir.join(os.path.basename(_env.__file__))))
assert f() == str(tmpdir.join("gdal_data"))
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_with_decorator_sets_gdal_data_wheel(find_file, gdalenv, monkeypatch, tmpdir):
"""fiona.env.ensure_env finds GDAL data in a wheel"""
@ensure_env_with_credentials
def f(*args):
return getenv()['GDAL_DATA']
find_file.return_value = None
tmpdir.ensure("gdal_data/header.dxf")
monkeypatch.delenv('GDAL_DATA', raising=False)
monkeypatch.setattr(_env, '__file__', str(tmpdir.join(os.path.basename(_env.__file__))))
assert f("foo") == str(tmpdir.join("gdal_data"))
def test_ensure_env_crs(path_coutwildrnp_shp):
"""Decoration of .crs works"""
assert fiona.open(path_coutwildrnp_shp).crs
def test_env_default_env(path_coutwildrnp_shp):
with fiona.open(path_coutwildrnp_shp):
assert hasenv()
def test_nested_gs_credentials(monkeypatch):
"""Check that rasterio.open() doesn't wipe out surrounding credentials"""
@ensure_env_with_credentials
def fake_opener(path):
return fiona.env.getenv()
with fiona.env.Env(session=GSSession(google_application_credentials='foo')):
assert fiona.env.getenv()['GOOGLE_APPLICATION_CREDENTIALS'] == 'foo'
gdalenv = fake_opener('gs://foo/bar')
assert gdalenv['GOOGLE_APPLICATION_CREDENTIALS'] == 'foo'
| {
"content_hash": "0bea0885beb6a619f68a9befdafbe900",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 97,
"avg_line_length": 32.82905982905983,
"alnum_prop": 0.670919031502213,
"repo_name": "rbuffat/Fiona",
"id": "ff85c3733a88b39bd2fdb8425569f947ab18c605",
"size": "3841",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_env.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "536189"
},
{
"name": "Shell",
"bytes": "4951"
}
],
"symlink_target": ""
} |
"""Treadmill host-ring service.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import glob
import logging
import os
import sys
import tempfile
import click
from treadmill import cli
from treadmill import context
from treadmill.websocket import client as ws_client
_LOGGER = logging.getLogger(__name__)
def init():
"""Return top level command handler."""
ctx = {}
@click.group(name='host-ring')
@click.option('--cell', required=True,
envvar='TREADMILL_CELL',
callback=cli.handle_context_opt,
expose_value=False)
@click.option('--api', required=False, help='REST API url to use.',
metavar='URL',
envvar='TREADMILL_RESTAPI')
@click.option('--wsapi', required=False, help='WebSocket API url to use.',
metavar='URL',
envvar='TREADMILL_WSAPI')
@click.option('--aliases-dir', required=True,
help='Host aliases dir.',
default='/etc/host-aliases')
def host_ring(api, wsapi, aliases_dir):
"""Manage /etc/hosts file inside the container."""
ctx['api'] = api
ctx['wsapi'] = wsapi
ctx['aliases_dir'] = aliases_dir
@host_ring.command(name='identity-group')
@click.option('--pattern', required=False,
default='{identity_group}.{identity}')
@click.argument('identity-group')
def identity_group_cmd(pattern, identity_group):
"""Manage /etc/hosts file inside the container.
"""
alias_dir = ctx['aliases_dir']
cell = context.GLOBAL.cell
def on_message(result):
"""Callback to process trace essage."""
host = result.get('host')
app = result.get('app')
identity_group = result['identity-group']
identity = result['identity']
_LOGGER.info('group: %s, identity: %s, host: %s, app: %s',
identity_group, identity, host, app)
alias_name = pattern.format(identity_group=identity_group,
identity=identity,
cell=cell)
link_name = os.path.join(alias_dir, alias_name)
if host:
temp_name = tempfile.mktemp(dir=alias_dir, prefix='^')
_LOGGER.info('Creating tempname: %s - %s', temp_name, host)
os.symlink(host, temp_name)
_LOGGER.info('Renaming: %s', link_name)
os.rename(temp_name, link_name)
else:
os.unlink(link_name)
return True
def on_error(result):
"""Callback to process errors."""
click.echo('Error: %s' % result['_error'], err=True)
glob_pattern = os.path.join(
alias_dir,
pattern.format(identity_group=identity_group,
identity='*',
cell=cell)
)
for path in glob.glob(glob_pattern):
os.unlink(path)
try:
return ws_client.ws_loop(
ctx['wsapi'],
{'topic': '/identity-groups',
'identity-group': identity_group},
False,
on_message,
on_error
)
except ws_client.WSConnectionError:
click.echo('Could not connect to any Websocket APIs', err=True)
sys.exit(-1)
del identity_group_cmd
return host_ring
| {
"content_hash": "2b5906ccefef5ba6a495b17d8a5918bf",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 78,
"avg_line_length": 32.230088495575224,
"alnum_prop": 0.5351455244371225,
"repo_name": "captiosus/treadmill",
"id": "97c965c742527c6a176625c9a4f4cf1cff823940",
"size": "3642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "treadmill/sproc/host_ring.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "570"
},
{
"name": "Python",
"bytes": "2598791"
},
{
"name": "Ruby",
"bytes": "3712"
},
{
"name": "Shell",
"bytes": "58099"
}
],
"symlink_target": ""
} |
"""Common tradfri test fixtures."""
from unittest.mock import Mock, patch
import pytest
from . import MOCK_GATEWAY_ID
from tests.components.light.conftest import mock_light_profiles # noqa: F401
# pylint: disable=protected-access
@pytest.fixture
def mock_gateway_info():
"""Mock get_gateway_info."""
with patch(
"homeassistant.components.tradfri.config_flow.get_gateway_info"
) as gateway_info:
yield gateway_info
@pytest.fixture
def mock_entry_setup():
"""Mock entry setup."""
with patch("homeassistant.components.tradfri.async_setup_entry") as mock_setup:
mock_setup.return_value = True
yield mock_setup
@pytest.fixture(name="gateway_id")
def mock_gateway_id_fixture():
"""Return mock gateway_id."""
return MOCK_GATEWAY_ID
@pytest.fixture(name="mock_gateway")
def mock_gateway_fixture(gateway_id):
"""Mock a Tradfri gateway."""
def get_devices():
"""Return mock devices."""
return gateway.mock_devices
def get_groups():
"""Return mock groups."""
return gateway.mock_groups
gateway_info = Mock(id=gateway_id, firmware_version="1.2.1234")
def get_gateway_info():
"""Return mock gateway info."""
return gateway_info
gateway = Mock(
get_devices=get_devices,
get_groups=get_groups,
get_gateway_info=get_gateway_info,
mock_devices=[],
mock_groups=[],
mock_responses=[],
)
with patch("homeassistant.components.tradfri.Gateway", return_value=gateway), patch(
"homeassistant.components.tradfri.config_flow.Gateway", return_value=gateway
):
yield gateway
@pytest.fixture(name="mock_api")
def mock_api_fixture(mock_gateway):
"""Mock api."""
async def api(command):
"""Mock api function."""
# Store the data for "real" command objects.
if hasattr(command, "_data") and not isinstance(command, Mock):
mock_gateway.mock_responses.append(command._data)
return command
return api
@pytest.fixture(name="api_factory")
def mock_api_factory_fixture(mock_api):
"""Mock pytradfri api factory."""
with patch("homeassistant.components.tradfri.APIFactory", autospec=True) as factory:
factory.init.return_value = factory.return_value
factory.return_value.request = mock_api
yield factory.return_value
| {
"content_hash": "91bff7d8842e2cfd70d970211942e93a",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 88,
"avg_line_length": 27.238636363636363,
"alnum_prop": 0.6587400917813934,
"repo_name": "adrienbrault/home-assistant",
"id": "54a8625f23c1398a79bd22e9ff3be878a771674a",
"size": "2397",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "tests/components/tradfri/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "32021043"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from binaryninja import *
# TODO: rule out impossible nestings to speed up traversing
def traverse_llil_basic_block(f_type, f, block):
for instr in block:
traverse_typed(f_type, f, instr)
def traverse_mlil_basic_block(f_type, f, block):
for instr in block:
traverse_typed(f_type, f, instr)
def traverse_function(f_type, f, funk):
for bb in funk.medium_level_il.basic_blocks:
traverse_typed(f_type, f, bb)
for bb in funk.low_level_il.basic_blocks:
traverse_typed(f_type, f, bb)
for bb in funk.basic_blocks:
traverse_typed(f_type, f, bb)
def traverse_binary_view(f_type, f, bv):
for funk in bv.functions:
traverse_typed(f_type, f, funk)
def traverse_dummy(f_type, f, node):
return
traversers = {
BasicBlock: traverse_dummy,
BinaryView: traverse_binary_view,
Function: traverse_function,
LowLevelILBasicBlock: traverse_llil_basic_block,
MediumLevelILBasicBlock: traverse_mlil_basic_block
}
def traverse_typed(f_type, f, node):
node_type = type(node)
if node_type == f_type:
return f(node)
else:
if node_type in traversers:
traversers[node_type](f_type, f, node)
else:
#print('fyb> missing traverser for "{}"'.format(node_type))
pass
def typify(f_type):
def dec(f):
def inner(node):
return traverse_typed(f_type, f, node)
return inner
return dec
import gen_syscall_table
import json
syscall_table = json.load(open(gen_syscall_table.syscall_path, 'r'))
@typify(Function)
def syscall_traverse_functions(f):
# Need this wrapper since I can't seem to find a way to get a reference to Function from a LLIL instruction
syscalls = []
@typify(LowLevelILInstruction)
def syscall_comment(node):
if not node.operation == LowLevelILOperation.LLIL_SYSCALL:
return
rax = node.get_reg_value('rax')
if rax.type == RegisterValueType.ConstantValue:
rax = rax.value
key = str(rax)
name = syscall_table[key] if key in syscall_table else 'unk({:02X})'.format(rax)
f.set_comment(node.address, name)
print('fyb> found syscall @ 0x{:08X}: {}'.format(node.address, name))
syscalls.append(name)
else:
print('fyb> unknown syscall @ 0x{:08X} => {}'.format(node.address, rax))
syscall_comment(f)
if len(syscalls) == 1 and f.name.startswith('sub_'):
f.name = '_calls_{}'.format(syscalls[0])
@typify(Function)
def print_calls(funk):
print('fyb> {}'.format(funk.name))
@typify(LowLevelILInstruction)
def print_call(instr):
if not instr.operation == LowLevelILOperation.LLIL_CALL:
return
print('\t{}'.format(instr))
print_call(funk)
@typify(Function)
def print_mlil_calls(funk):
print('fyb> {}'.format(funk.name))
@typify(MediumLevelILInstruction)
def print_call(instr):
if not instr.operation == MediumLevelILOperation.MLIL_CALL:
return
print('\t{}'.format(instr))
print_call(funk)
@typify(MediumLevelILBasicBlock)
def print_mlil(block):
def trav(il, indent=0):
if isinstance(il, MediumLevelILInstruction):
print('\t' * indent + il.operation.name)
for operand in il.operands:
trav(operand, indent + 1)
else:
print('\t' * indent + str(il))
for instr in block:
trav(instr)
| {
"content_hash": "1349e5a231db53cb750b0ab29f8d4319",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 111,
"avg_line_length": 26.08823529411765,
"alnum_prop": 0.6226042841037204,
"repo_name": "kenoph/binja-fyb",
"id": "c400a5e81a1f4ca7228841d20189c1b66974f58b",
"size": "3548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4448"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import requests
import logging
import jsonschema
import base64
import tempfile
import subprocess
import time
import random
import json
from cryptography.fernet import Fernet
import syndicate.util.provisioning as provisioning
import syndicate.util.objects as object_stub
import syndicate.util.paths as paths
import syndicate.util.crypto as crypto
import syndicate.util.client as rpcclient
import syndicate.util.config as conf
import syndicate.util.storage as storage
import syndicate.syndicate as libsyndicate
import syndicate.protobufs.ms_pb2 as ms_pb2
DEBUG = True
# taken from registrar/syndicate_signup.py
SIGNUP_URL = "https://syndicate-demo-signup.appspot.com"
SIGNUP_AUTH_SECRET = "ac5c015e354bf68a81df8177858064a296b3377d7da7828b71a393c7eee01a60ec840c2013485608c1732abe65927d87adfa159f36ec604638c147ccff777c80"
if os.environ.get("SIGNUP_URL", None) is not None:
SIGNUP_URL = os.environ['SIGNUP_URL']
def get_logger(name=None):
"""
Make a singleton logger
"""
level = logging.CRITICAL
if DEBUG:
logging.disable(logging.NOTSET)
level = logging.DEBUG
if name is None:
name = "<unknown>"
log = logging.getLogger(name=name)
log.setLevel( level )
console = logging.StreamHandler()
console.setLevel( level )
log_format = ('[%(asctime)s] [%(levelname)s] [%(module)s:%(lineno)d] (' + str(os.getpid()) + '.%(thread)d) %(message)s' if DEBUG else '%(message)s')
formatter = logging.Formatter( log_format )
console.setFormatter(formatter)
log.propagate = False
if len(log.handlers) > 0:
for i in xrange(0, len(log.handlers)):
log.handlers.pop(0)
log.addHandler(console)
return log
log = get_logger("demo_client")
def get_demo_payload(username, password):
"""
Get the demo payload for this user.
Return the payload on success
Return None on error
"""
try:
req = requests.get(SIGNUP_URL + '/provision/{}'.format(username), headers={'authorization': 'bearer {}'.format(SIGNUP_AUTH_SECRET)})
payload = req.json()
except Exception as e:
log.exception(e)
return None
payload_schema = {
'type': 'object',
'properties': {
'user_pkey': {
'type': 'string',
},
'gateway_pkey': {
'type': 'string',
},
'user_cert': {
'type': 'string',
},
'ug_cert': {
'type': 'string',
},
'rg_cert': {
'type': 'string',
},
},
'required': [
'user_pkey',
'gateway_pkey',
'user_cert',
'ug_cert',
'rg_cert'
]
}
try:
jsonschema.validate(payload, payload_schema)
except jsonschema.ValidationError:
log.error("Invalid key data: {}".format(keys))
return None
# decrypt encrypted fields
password = base64.urlsafe_b64encode( base64.b64decode(password) )
for encrypted_field in ['user_pkey', 'gateway_pkey']:
f = Fernet(password)
payload[encrypted_field] = f.decrypt(str(payload[encrypted_field]))
# parse certificates
user_cert = ms_pb2.ms_user_cert()
ug_cert = ms_pb2.ms_gateway_cert()
rg_cert = ms_pb2.ms_gateway_cert()
try:
user_cert.ParseFromString(base64.b64decode(payload['user_cert']))
ug_cert.ParseFromString(base64.b64decode(payload['ug_cert']))
rg_cert.ParseFromString(base64.b64decode(payload['rg_cert']))
payload['user_cert'] = user_cert
payload['ug_cert'] = ug_cert
payload['rg_cert'] = rg_cert
except Exception as e:
log.exception(e)
return None
return payload
def sanitize_name(name):
"""
Make a user_name string suitable for inclusion
in a gateway or volume name.
"""
return name.replace('@', '-0x40-').replace('+', '-0x2B-')
def make_volume_info(username, demo_payload):
"""
Make volume information to be passed into the syndicate automount client
"""
volume_name = sanitize_name('demo.volume-{}'.format(username))
volume_info = {
volume_name: {
'gateways': {
'__pkey__': demo_payload['gateway_pkey'],
demo_payload['ug_cert'].name: base64.b64encode(demo_payload['ug_cert'].SerializeToString()),
demo_payload['rg_cert'].name: base64.b64encode(demo_payload['rg_cert'].SerializeToString()),
},
'users': {
username: {
'cert': base64.b64encode(demo_payload['user_cert'].SerializeToString()),
'pkey': demo_payload['user_pkey'],
},
},
'hints': {
'gateways': {
demo_payload['ug_cert'].name: {
'mode': 'user-filesystem'
},
},
},
},
}
return volume_info
def run_provision(volume_info_path):
"""
Run the automount client to provision the volume
Return True on success
Return False on error
"""
syndicate_config = conf.get_config_from_argv(sys.argv)
config_path = syndicate_config['config_path']
p = subprocess.Popen(["syndicate-amd", "--debug", "-c", config_path, "provision", volume_info_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = p.communicate()
p.wait()
if p.returncode != 0:
log.error("Failed to run `syndicate-amd provision {}`: exit code {}".format(volume_info_path, p.returncode))
log.error("Stdout:\n{}".format( '\n'.join( [' {}'.format(l) for l in out.split('\n')] )))
log.error("Stderr:\n{}".format( '\n'.join( [' {}'.format(l) for l in err.split('\n')] )))
return False
return True
def provision_volume(volume_info):
"""
Given volume info from make_volume_info(), instantiate it
Return True on success
Return False on error (caller should retry)
"""
# save it
volume_info_data = json.dumps(volume_info)
rc = None
path = None
try:
fd, path = tempfile.mkstemp()
os.close(fd)
with open(path, 'w') as f:
f.write(volume_info_data)
rc = run_provision(path)
os.unlink(path)
except Exception as e:
log.exception(e)
os.unlink(path)
finally:
if path is not None and os.path.exists(path):
os.unlink(path)
return rc
def main():
"""
Go start up the volume
"""
syndicate_config = conf.get_config_from_argv(sys.argv)
args = syndicate_config['params']
if len(args) != 2:
print >> sys.stderr, "Usage: {} email password".format(sys.argv[0])
return 1
username = args[0]
password = args[1]
demo_payload = get_demo_payload(username, password)
if demo_payload is None:
print >> sys.stderr, "Failed to access demo service. Please try again later."
return 1
volume_info = make_volume_info(username, demo_payload)
timeout = 1.0
print "Starting up Syndicate gateways..."
while True:
rc = provision_volume(volume_info)
if rc:
break
# failure
print >> sys.stderr, "Failed to provision Syndicate volume; retrying in {} seconds".format(timeout)
time.sleep(timeout)
timeout = timeout * 2 + random.random() * timeout
return 0
if __name__ == "__main__":
rc = main()
sys.exit(rc)
| {
"content_hash": "af0d0c5af3c5036f10085e1e0de5224d",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 175,
"avg_line_length": 28.600694444444443,
"alnum_prop": 0.6020395775160859,
"repo_name": "syndicate-storage/syndicate-core",
"id": "65e62be8c234435b504efeba6911473dfef7bcd9",
"size": "8257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demo/demo-client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "144926"
},
{
"name": "C++",
"bytes": "1667422"
},
{
"name": "Makefile",
"bytes": "20511"
},
{
"name": "Python",
"bytes": "2258035"
},
{
"name": "Shell",
"bytes": "42197"
}
],
"symlink_target": ""
} |
from sys import stdin
from itertools import combinations
remainingTime = int(stdin.readline())
remainingExercises = int(stdin.readline())
# Grab exercises
data = []
for i in range(remainingExercises):
data.append([int(x) for x in stdin.readline().split()])
hasSolution = True
combinationLength = 2
maxPoints = 0
while hasSolution:
attemptsInRange = []
# Get every combination having a length "combinationLength"
for attempt in combinations(data, combinationLength):
timer = 0
points = 0
# Transform tuples to a list
attempt = list(attempt)
for exercise in attempt:
timer += exercise[0]
points += exercise[1]
# Remember if the attemp was in our allowed time range
inTimeAllowed = (timer <= remainingTime)
attemptsInRange.append(inTimeAllowed)
# We have found a solution! Remember it
if (inTimeAllowed and points > maxPoints):
maxPoints = points
# If we had only failed attemps, we can't do better
if (attemptsInRange.count(False) == len(attemptsInRange)):
hasSolution = False
# Otherwise, try to do more exercises
else:
combinationLength += 1
print maxPoints | {
"content_hash": "1f63e416005b557143bc02cfa517ddb2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 60,
"avg_line_length": 24.622222222222224,
"alnum_prop": 0.7364620938628159,
"repo_name": "AntoineAugusti/katas",
"id": "85a6370839588e871589f5d89e2d30e7f9588def",
"size": "1171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prologin/2014/12_deadline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "2728"
},
{
"name": "Java",
"bytes": "5700"
},
{
"name": "Python",
"bytes": "78940"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, clear_url_caches
from django.http import HttpResponsePermanentRedirect
from django.middleware.locale import LocaleMiddleware
from django.test import TestCase
from django.test.utils import override_settings
from django.template import Template, Context
from django.utils._os import upath
from django.utils import translation
class PermanentRedirectLocaleMiddleWare(LocaleMiddleware):
response_redirect_class = HttpResponsePermanentRedirect
@override_settings(
USE_I18N=True,
LOCALE_PATHS=(
os.path.join(os.path.dirname(upath(__file__)), 'locale'),
),
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
LANGUAGE_CODE='en-us',
LANGUAGES=(
('nl', 'Dutch'),
('en', 'English'),
('pt-br', 'Brazilian Portuguese'),
),
MIDDLEWARE_CLASSES=(
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
),
)
class URLTestCaseBase(TestCase):
"""
TestCase base-class for the URL tests.
"""
urls = 'i18n.patterns.urls.default'
def setUp(self):
# Make sure the cache is empty before we are doing our tests.
clear_url_caches()
def tearDown(self):
# Make sure we will leave an empty cache for other testcases.
clear_url_caches()
class URLPrefixTests(URLTestCaseBase):
"""
Tests if the `i18n_patterns` is adding the prefix correctly.
"""
def test_not_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
with translation.override('nl'):
self.assertEqual(reverse('not-prefixed'), '/not-prefixed/')
self.assertEqual(reverse('not-prefixed-included-url'), '/not-prefixed-include/foo/')
def test_prefixed(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/en/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/nl/prefixed/')
@override_settings(ROOT_URLCONF='i18n.patterns.urls.wrong')
def test_invalid_prefix_use(self):
self.assertRaises(ImproperlyConfigured, lambda: reverse('account:register'))
class URLDisabledTests(URLTestCaseBase):
urls = 'i18n.patterns.urls.disabled'
@override_settings(USE_I18N=False)
def test_prefixed_i18n_disabled(self):
with translation.override('en'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
with translation.override('nl'):
self.assertEqual(reverse('prefixed'), '/prefixed/')
class PathUnusedTests(URLTestCaseBase):
"""
Check that if no i18n_patterns is used in root urlconfs, then no
language activation happens based on url prefix.
"""
urls = 'i18n.patterns.urls.path_unused'
def test_no_lang_activate(self):
response = self.client.get('/nl/foo/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
class URLTranslationTests(URLTestCaseBase):
"""
Tests if the pattern-strings are translated correctly (within the
`i18n_patterns` and the normal `patterns` function).
"""
def test_no_prefix_translated(self):
with translation.override('en'):
self.assertEqual(reverse('no-prefix-translated'), '/translated/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/translated/yeah/')
with translation.override('nl'):
self.assertEqual(reverse('no-prefix-translated'), '/vertaald/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/vertaald/yeah/')
with translation.override('pt-br'):
self.assertEqual(reverse('no-prefix-translated'), '/traduzidos/')
self.assertEqual(reverse('no-prefix-translated-slug', kwargs={'slug': 'yeah'}), '/traduzidos/yeah/')
def test_users_url(self):
with translation.override('en'):
self.assertEqual(reverse('users'), '/en/users/')
with translation.override('nl'):
self.assertEqual(reverse('users'), '/nl/gebruikers/')
self.assertEqual(reverse('prefixed_xml'), '/nl/prefixed.xml')
with translation.override('pt-br'):
self.assertEqual(reverse('users'), '/pt-br/usuarios/')
class URLNamespaceTests(URLTestCaseBase):
"""
Tests if the translations are still working within namespaces.
"""
def test_account_register(self):
with translation.override('en'):
self.assertEqual(reverse('account:register'), '/en/account/register/')
with translation.override('nl'):
self.assertEqual(reverse('account:register'), '/nl/profiel/registeren/')
class URLRedirectTests(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get('/profiel/registeren/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertRedirects(response, '/nl/profiel/registeren/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='nl')
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get('/conta/registre-se/', HTTP_ACCEPT_LANGUAGE='pt-br')
self.assertRedirects(response, '/pt-br/conta/registre-se/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='pl-pl')
self.assertRedirects(response, '/en/account/register/')
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE_CLASSES=(
'i18n.patterns.tests.PermanentRedirectLocaleMiddleWare',
'django.middleware.common.CommonMiddleware',
),
)
def test_custom_redirect_class(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/', 301)
class URLVaryAcceptLanguageTests(URLTestCaseBase):
"""
Tests that 'Accept-Language' is not added to the Vary header when using
prefixed URLs.
"""
def test_no_prefix_response(self):
response = self.client.get('/not-prefixed/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.get('Vary'), 'Accept-Language')
def test_en_redirect(self):
response = self.client.get('/account/register/', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register/')
self.assertFalse(response.get('Vary'))
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
self.assertFalse(response.get('Vary'))
class URLRedirectWithoutTrailingSlashTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=True`).
"""
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/not-prefixed/', 301)
def test_en_redirect(self):
response = self.client.get('/account/register', HTTP_ACCEPT_LANGUAGE='en', follow=True)
# target status code of 301 because of CommonMiddleware redirecting
self.assertIn(('http://testserver/en/account/register/', 301), response.redirect_chain)
self.assertRedirects(response, '/en/account/register/', 302)
response = self.client.get('/prefixed.xml', HTTP_ACCEPT_LANGUAGE='en', follow=True)
self.assertRedirects(response, '/en/prefixed.xml', 302)
class URLRedirectWithoutTrailingSlashSettingTests(URLTestCaseBase):
"""
Tests the redirect when the requested URL doesn't end with a slash
(`settings.APPEND_SLASH=False`).
"""
@override_settings(APPEND_SLASH=False)
def test_not_prefixed_redirect(self):
response = self.client.get('/not-prefixed', HTTP_ACCEPT_LANGUAGE='en')
self.assertEqual(response.status_code, 404)
@override_settings(APPEND_SLASH=False)
def test_en_redirect(self):
response = self.client.get('/account/register-without-slash', HTTP_ACCEPT_LANGUAGE='en')
self.assertRedirects(response, '/en/account/register-without-slash', 302)
response = self.client.get(response['location'])
self.assertEqual(response.status_code, 200)
class URLResponseTests(URLTestCaseBase):
"""
Tests if the response has the right language-code.
"""
def test_not_prefixed_with_prefix(self):
response = self.client.get('/en/not-prefixed/')
self.assertEqual(response.status_code, 404)
def test_en_url(self):
response = self.client.get('/en/account/register/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'en')
self.assertEqual(response.context['LANGUAGE_CODE'], 'en')
def test_nl_url(self):
response = self.client.get('/nl/profiel/registeren/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'nl')
self.assertEqual(response.context['LANGUAGE_CODE'], 'nl')
def test_wrong_en_prefix(self):
response = self.client.get('/en/profiel/registeren/')
self.assertEqual(response.status_code, 404)
def test_wrong_nl_prefix(self):
response = self.client.get('/nl/account/register/')
self.assertEqual(response.status_code, 404)
def test_pt_br_url(self):
response = self.client.get('/pt-br/conta/registre-se/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-language'], 'pt-br')
self.assertEqual(response.context['LANGUAGE_CODE'], 'pt-br')
class URLTagTests(URLTestCaseBase):
"""
Test if the language tag works.
"""
def test_strings_only(self):
t = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(t.render(Context({})).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_context(self):
ctx = Context({'lang1': 'nl', 'lang2': 'pt-br'})
tpl = Template("""{% load i18n %}
{% language lang1 %}{% url 'no-prefix-translated' %}{% endlanguage %}
{% language lang2 %}{% url 'no-prefix-translated' %}{% endlanguage %}""")
self.assertEqual(tpl.render(ctx).strip().split(),
['/vertaald/', '/traduzidos/'])
def test_args(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' 'apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
def test_kwargs(self):
tpl = Template("""{% load i18n %}
{% language 'nl' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}
{% language 'pt-br' %}{% url 'no-prefix-translated-slug' slug='apo' %}{% endlanguage %}""")
self.assertEqual(tpl.render(Context({})).strip().split(),
['/vertaald/apo/', '/traduzidos/apo/'])
| {
"content_hash": "fe568e213eced58fd50fd978ca61fa93",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 112,
"avg_line_length": 39.74153846153846,
"alnum_prop": 0.6491173737999381,
"repo_name": "yceruto/django",
"id": "78f5a46aae72b6094674732cd6ed3700802ccbf1",
"size": "12916",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/i18n/patterns/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51177"
},
{
"name": "JavaScript",
"bytes": "102290"
},
{
"name": "Python",
"bytes": "9171078"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
} |
import os
import re
import unittest
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
# from linkcheck.utils import find as find_links
from pidman.pid.ark_utils import normalize_ark, valid_qualifier, \
invalid_qualifier_characters
from pidman.pid.models import Pid, Domain, Policy, Proxy, parse_resolvable_url
from pidman.pid.noid import encode_noid, decode_noid
class PidTestCase(TestCase):
fixtures = ['pids.json']
def setUp(self):
# dependent objects to use for creating test pids
self.domain = Domain(name="test domain")
self.domain.save()
self.user = User(username="piduser")
self.user.set_password("pidpass")
self.user.save()
# test pids: one ark & one purl
self.ark = Pid(name="testark", domain=self.domain, creator=self.user,
editor=self.user, type="Ark")
self.ark.save()
self.purl = Pid(name="testpurl", domain=self.domain, creator=self.user,
editor=self.user, type="Purl")
self.purl.save()
def tearDown(self):
self.domain.delete()
self.user.delete()
self.ark.delete()
self.purl.delete()
def test_mint_noid(self):
noid = Pid.mint_noid()
self.assertNotEqual(None, noid, "value returned by mint_noid should not be None")
self.assert_(re.compile("^[a-z0-9]+$").match(noid),
"generated noid '" + noid + "' matches expected pattern")
def test_primary_target(self):
self.assertEqual(None, self.ark.primary_target())
self.assertEqual(None, self.purl.primary_target())
purl_target = self.purl.target_set.create(uri="some.uri")
self.assertEqual(purl_target, self.purl.primary_target())
ark_target = self.ark.target_set.create(uri="some.other.uri")
ark_qual_target = self.ark.target_set.create(uri="some.other.uri/foo", qualify='q')
self.assertEqual(ark_target, self.ark.primary_target())
def test_is_valid__purl(self):
self.assert_(self.purl.is_valid(), "purl with no targets is valid")
self.purl.target_set.create(uri="some.uri")
self.assert_(self.purl.is_valid(), "purl with single unqualified target is valid")
self.purl.primary_target().qualify = "qual"
self.assertRaises(Exception, self.purl.is_valid, "purl with single qualified target is invalid")
self.purl.target_set.get().qualify = ""
self.purl.target_set.create(qualify='q', uri="no.uri")
self.assertRaises(Exception, self.purl.is_valid, "purl with multiple targets is invalid")
def test_is_valid__ark(self):
self.assert_(self.ark.is_valid(), "ark with no targets is valid")
self.ark.target_set.create(uri="http://some.uri")
self.assert_(self.ark.is_valid(), "ark with one unqualified target is valid")
self.ark.target_set.create(qualify="q", uri="http://other.uri")
self.assert_(self.ark.is_valid(), "ark with two targets is valid")
self.ark.target_set.create(qualify="qual", uri="http://some.url", proxy=None)
self.assert_(self.ark.is_valid(), "ark with two targets is valid")
for t in self.ark.target_set.all():
t.qualify = "q"
self.assertRaises(Exception, self.ark.is_valid, "ark with duplicate qualifiers is invalid")
def test_purl_url(self):
# url when there is no target
self.assertEqual('', self.purl.url(),
"url for purl with no target should be '', got " + self.purl.url())
# now add a target
self.purl.target_set.create(uri="some.uri")
self.assertEqual(settings.PID_RESOLVER_URL + "/" + self.purl.pid, self.purl.url(),
"url for purl with target should be " + settings.PID_RESOLVER_URL + "/" +
self.purl.pid + ", got " + self.purl.url())
def test_ark_url(self):
# url when there is no target
self.assertEqual('', self.ark.url(),
"url for ark with no target should be '', got " + self.ark.url())
# add a qualified target (no unqualified/primary target)
self.ark.target_set.create(qualify="q", uri="http://ti.ny")
self.assertEqual(settings.PID_RESOLVER_URL + "/ark:/" + settings.PID_ARK_NAAN + "/" +
self.ark.pid + "/q", self.ark.url(), "url for ark with no primary target should be " +
settings.PID_RESOLVER_URL + "/ark:/" + settings.PID_ARK_NAAN + "/" +
self.ark.pid + "/q , got " + self.ark.url())
# add an unqualified target
self.ark.target_set.create(uri="http://wh.ee")
self.assertEqual(settings.PID_RESOLVER_URL + "/ark:/" + settings.PID_ARK_NAAN + "/" +
self.ark.pid, self.ark.url(), "url for ark with primary target should be " +
settings.PID_RESOLVER_URL + "/ark:/" + settings.PID_ARK_NAAN + "/" +
self.ark.pid + ", got " + self.ark.url())
def test_get_policy(self):
# pid with explicit policy set
pid = Pid.objects.get(pk=1)
p = pid.get_policy()
self.assert_(isinstance(p, Policy), "Pid get_policy returns Policy object")
self.assertEqual(p, pid.policy, "get_policy response is Pid policy")
self.assertNotEqual(p, pid.domain.policy, "get_policy response is different than domain policy")
# pid with no explicit policy - inherits from domain
pid = Pid.objects.get(pk=2)
self.assertEqual(pid.policy, None, "test pid has no explicit policy")
p = pid.get_policy()
self.assert_(isinstance(p, Policy), "pid get_policy returns Policy object")
self.assertEqual(p, pid.domain.policy, "pid get_policy returns domain policy")
# inactive pid returns inactive policy
pid = Pid.objects.get(pk=2)
for t in pid.target_set.all():
t.active = False
t.save()
self.assertEquals(pid.is_active(), False)
p = Policy.objects.get(title__exact='Inactive Policy')
self.assertEquals(pid.get_policy(), p)
def test_url_link(self):
self.purl.target_set.create(uri="some.uri")
url = settings.PID_RESOLVER_URL + "/" + self.purl.pid
self.assert_(re.compile('^<a [^>]*href=[\'"]' + url + '[\'"]>' + url + '</a>$').match(self.purl.url_link()),
"url link for purl with target should match pattern for link with "
+ url + ", got " + self.purl.url_link())
def test_is_active(self):
# setup creates ark/purl with no targets, so they are inactive
self.assertFalse(self.ark.is_active())
self.assertFalse(self.purl.is_active())
purl_target = self.purl.target_set.create(uri="some.uri")
ark_target = self.ark.target_set.create(uri="some.other.uri")
self.assertTrue(self.ark.is_active())
self.assertTrue(self.purl.is_active())
purl_target.active = False
purl_target.save()
ark_target.active = False
ark_target.save()
self.assertFalse(self.ark.is_active())
self.assertFalse(self.purl.is_active())
@unittest.skip # linkcheck disabled in 0.10
def test_target_linkcheck_status(self):
# no links checked, status -> none
self.assertEqual(None, self.ark.target_linkcheck_status(),
'ARK with no targets should have linkcheck status of None (unknown)')
self.assertEqual(None, self.purl.target_linkcheck_status(),
'PURL with no targets should have linkcheck status of None (unknown)')
# add targets; automatically get checked and will fail
purl_target = self.purl.target_set.create(uri="some.uri")
ark_target = self.ark.target_set.create(uri="some.other.uri")
ark_qual_target = self.ark.target_set.create(uri="some.other.uri/foo", qualify='q')
self.assertFalse(self.ark.target_linkcheck_status(),
'ARK with invalid target uris should have linkcheck status of False (error)')
self.assertFalse(self.purl.target_linkcheck_status(),
'PURL with invalid target uri should have linkcheck status of False (error)')
# manually update status to check valid / mixed
purl_linkcheck = purl_target.linkcheck.first()
purl_linkcheck.url.status = True
purl_linkcheck.url.save()
self.assertTrue(self.purl.target_linkcheck_status(),
'purl with valid target uri should have link status True (ok)')
# one valid ark target and one invalid is still invalid
ark_linkcheck = ark_target.linkcheck.first()
ark_linkcheck.url.status = True
ark_linkcheck.url.save()
self.assertFalse(self.ark.target_linkcheck_status(),
'ark with one valid and one invalid target uri should have ' + \
'link status False (error)')
ark_qual_linkcheck = ark_qual_target.linkcheck.first()
ark_qual_linkcheck.url.status = True
ark_qual_linkcheck.url.save()
self.assertTrue(self.ark.target_linkcheck_status(),
'ark with all valid target uris should have link status True (ok)')
class TargetTestCase(TestCase):
fixtures = ['pids.json']
def setUp(self):
# dependent objects to use for creating test pids
self.domain = Domain(name="test domain")
self.domain.save()
self.user = User(username="piduser")
self.user.set_password("pidpass")
self.user.save()
self.ark = Pid(name="testark", domain=self.domain, creator=self.user,
editor=self.user, type="Ark")
self.ark.save()
self.purl = Pid(name="testpurl", domain=self.domain, creator=self.user,
editor=self.user, type="Purl")
self.purl.save()
self.proxy = Proxy(name="testproxy", transform="proxy.com?url=")
self.proxy.save()
def tearDown(self):
self.domain.delete()
self.user.delete()
self.ark.delete()
self.purl.delete()
self.proxy.delete()
def test_get_resolvable_url(self):
t = self.ark.target_set.create(uri="some.uri")
# test against expected ark url from settings in config file
base_ark = settings.PID_RESOLVER_URL + "/ark:/" + settings.PID_ARK_NAAN
self.assertEqual(base_ark + "/" + self.ark.pid, t.get_resolvable_url())
t.qualify = ""
self.assertEqual(base_ark + "/" + self.ark.pid, t.get_resolvable_url())
t.qualify = "?"
self.assertEqual(base_ark + "/" + self.ark.pid + "/?", t.get_resolvable_url())
t.qualify = "some/long/qualifier.txt"
self.assertEqual(base_ark + "/" + self.ark.pid + "/some/long/qualifier.txt", t.get_resolvable_url())
t = self.purl.target_set.create(uri="some.uri")
self.assertEqual(settings.PID_RESOLVER_URL + "/" + self.purl.pid, t.get_resolvable_url())
def test_token_replacement(self):
self.ark.target_set.create(uri="http://some.url/with/" + settings.PID_REPLACEMENT_TOKEN)
self.assertEqual("http://some.url/with/" + self.ark.pid, self.ark.primary_target().uri)
def test_invalid_qualifier(self):
self.assertRaises(Exception, self.ark.target_set.create,
"attempting to save a target with invalid qualifiers raises an exception",
qualify='q^', uri="no.uri",)
def test_get_policy(self):
# top-level domain
domain = Domain.objects.get(pk=1)
p = domain.get_policy()
self.assert_(isinstance(p, Policy), "domain get_policy returns Policy object")
self.assertEqual(p, domain.policy)
# with no explicit policy
collection = Domain.objects.get(pk=4)
self.assertEqual(collection.policy, None, "collection has no policy")
p = collection.get_policy()
self.assert_(isinstance(p, Policy), "collection get_policy returns Policy object")
self.assertEqual(p, collection.parent.policy, "collection get_policy returns parent domain's policy")
# collection with explicit policy different from parent domain
collection = Domain.objects.get(pk=2)
self.assert_(isinstance(collection.policy, Policy), "collection has its own policy")
p = collection.get_policy()
self.assert_(isinstance(p, Policy), "collection get_policy returns Policy object")
self.assertEqual(p, collection.policy, "collection get_policy returns collection's policy")
self.assertNotEqual(p, collection.parent.policy, "collection get_policy returns collection's policy")
class ParseResolvableUrlTestCase(TestCase):
def test_parse_resolvable_url(self):
# simple purl
p = parse_resolvable_url("http://pid.emory.edu/123")
self.assertEqual("http", p['scheme'])
self.assertEqual("pid.emory.edu", p['hostname'])
self.assertEqual("Purl", p['type'])
self.assertEqual("123", p['noid'])
# unqualified ark
p = parse_resolvable_url("https://pidtest.com/ark:/909/23a")
self.assertEqual("https", p['scheme'])
self.assertEqual("pidtest.com", p['hostname'])
self.assertEqual("Ark", p['type'])
self.assertEqual("909", p['naan'])
self.assertEqual("23a", p['noid'])
self.assertEqual('', p['qualifier'])
# qualified arks
p = parse_resolvable_url("http://pidtest.com/ark:/909/23a/PDF")
self.assertEqual("PDF", p['qualifier'])
p = parse_resolvable_url("http://pidtest.com/ark:/5439/d00d/some/long/qualifier.txt")
self.assertEqual("Ark", p['type'])
self.assertEqual("5439", p['naan'])
self.assertEqual("d00d", p['noid'])
self.assertEqual("some/long/qualifier.txt", p['qualifier'])
# special case qualifiers - not yet handled, but would like to be able to recognize
p = parse_resolvable_url("http://pidtest.com/ark:/909/23a?")
self.assertEqual("?", p['qualifier'])
p = parse_resolvable_url("http://pidtest.com/ark:/909/23a??")
self.assertEqual("??", p['qualifier'])
class ArkUtilsTestCase(TestCase):
def test_normalize_ark(self):
# examples here are from the character repertoires section of ARK spec
n = normalize_ark("65-4-xz-321")
self.assertEqual("654xz321", n,
"65-4-xz-321 should be normalized to 654xz321, got '" + n + "'")
n = normalize_ark("654--xz32-1")
self.assertEqual("654xz321", n,
"654--xz32-1 should be normalized to 654xz321, got '" + n + "'")
n = normalize_ark("654xz321")
self.assertEqual("654xz321", n,
"654xz321 should be normalized to 654xz321, got '" + n + "'")
# remove / or . as last char
n = normalize_ark("654.")
self.assertEqual("654", n, "654. should be normalized to 654, got '" + n + "'")
n = normalize_ark("654/")
self.assertEqual("654", n, "654/ should be normalized to 654, got '" + n + "'")
n = normalize_ark("6-5-4.")
self.assertEqual("654", n, "6-5-4. should be normalized to 654, got '" + n +"'")
def test_valid_qualifier(self):
self.assertTrue(valid_qualifier("45ae%"), "'45ae%' is a valid qualifier")
self.assertFalse(valid_qualifier("45ae^"), "'45ae^' is not a valid qualifier")
def test_invalid_qualifier_characters(self):
self.assertEqual(['^'], invalid_qualifier_characters('45ae^'))
self.assertEqual(['^', '~'], invalid_qualifier_characters('45ae^0u~f'))
self.assertEqual(['^~', ':;'], invalid_qualifier_characters('ab^~cde:;f'))
class NoidTestCase(TestCase):
def test_round_trip_to_int(self):
for i in xrange(10000):
pid = encode_noid(i)
decoded = decode_noid(pid)
self.assertEqual(i, decoded)
def test_encode_known_pids(self):
# check codec logic against a sample of real production noids
noids = ['2dbx', '5z8x', '13kpr', '17gvd', '17ktk']
for noid in noids:
i = decode_noid(noid)
encoded = encode_noid(i)
self.assertEqual(noid, encoded)
| {
"content_hash": "7f31a84f4bcbe26605fa87824b1909b0",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 116,
"avg_line_length": 46.594202898550726,
"alnum_prop": 0.6235769828926905,
"repo_name": "emory-libraries/pidman",
"id": "5b291f56b007b4349b153e029129337ae1ecbfd6",
"size": "16075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pidman/pid/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7736"
},
{
"name": "Python",
"bytes": "195026"
}
],
"symlink_target": ""
} |
import unittest
import os
class TestCluster(unittest.TestCase):
"""
Test cluster functionality.
"""
def setUp(self):
# Find out the current directory
self.miso_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
self.tests_data_dir = os.path.join(self.miso_path, "test-data")
self.events_analysis_cmd = "python %s " %(os.path.join(self.miso_path,
"run_events_analysis.py"))
self.tests_output_dir = os.path.join(self.miso_path, "test-output")
self.test_sam_filename = os.path.join(self.tests_data_dir,
"sam-data",
"c2c12.Atp2b1.sam")
self.gff_events_dir = os.path.join(self.miso_path, "gff-events")
self.sam_to_bam_script = os.path.join(self.miso_path, "sam_to_bam.py")
self.index_gff_script = os.path.join(self.miso_path, "index_gff.py")
def test_cluster_single_end_run(self):
"""
Test MISO on cluster.
"""
print "Testing single-end SE event interface..."
##
## Try running MISO on cluster using default settings.
##
sample_name = "se-sample"
counts_filename = os.path.join(self.tests_data_dir,
"se-counts",
"se_test.counts")
output_dir = os.path.join(self.tests_output_dir, "SE-output")
read_len = 35
overhang_len = 4
event_type = "SE"
miso_cmd = "%s --compute-events-psi %s %s --output-dir %s --read-len %d --overhang-len %d " \
" --event-type %s --use-cluster " %(self.events_analysis_cmd,
sample_name,
counts_filename,
output_dir,
read_len,
overhang_len,
event_type)
print "Executing: %s" %(miso_cmd)
os.system(miso_cmd)
def test_cluster_gene_psi(self):
"""
Test gene-level Psi inferences using SAM/BAM reads.
"""
print "Testing gene-level Psi..."
sam_dir = os.path.join(self.tests_output_dir, "sam-output")
bam_filename = os.path.join(sam_dir, "c2c12.Atp2b1.sorted.bam")
read_len = 36
insert_mean = 250
insert_sd = 30
# First index the GFF of interest
gff_filename = os.path.join(self.gff_events_dir, "mm9", "genes", "Atp2b1.mm9.gff")
gff_index_dir = os.path.join(self.gff_events_dir, "mm9", "indexed")
index_cmd = "python %s --index %s %s" %(self.index_gff_script,
gff_filename,
gff_index_dir)
print "Executing: %s" %(index_cmd)
os.system(index_cmd)
output_dir = os.path.join(self.tests_output_dir, "gene-psi-output")
miso_cmd = "%s --compute-genes-psi %s %s --output-dir %s --read-len %d " \
" --paired-end %d %d --use-cluster" \
%(self.events_analysis_cmd,
gff_index_dir,
bam_filename,
output_dir,
read_len,
insert_mean,
insert_sd)
print "Executing: %s" %(miso_cmd)
os.system(miso_cmd)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "ea957bd2db384c418961aec5599be121",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 101,
"avg_line_length": 40.824175824175825,
"alnum_prop": 0.4635262449528937,
"repo_name": "wuxue/altanalyze",
"id": "1df31ae30aca102424f45502bdf1eb4ae376704e",
"size": "3778",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "misopy/test_cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10351946"
}
],
"symlink_target": ""
} |
import time
import BaseHTTPServer
import re
HOST_NAME = ''
PORT_NUMBER = 80
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
s.send_response(200)
path = re.split('/', s.path)
actiontagsStr = path[1]
# I'm like 90% sure there has to be a library to this automatically, but
# i have no idea what these things are called so I've nothing to google to find it.
# You know, the thingys on the end of a url. like...
# google.com/searchy?tagthingy=hello&nsabackdoor=true
# those things. lets call them tags.
# Regular expressions to break up the url into parts with terribly named variables
strUserAction = re.split('\?', actiontagsStr)[0]
strAllTags = re.split('\?', actiontagsStr)[1]
listAllTags = re.split('\&', strAllTags)
# For each... tag... group... which is a list... add it to another list of all the tags.
allTags = []
for tag in listAllTags:
listTag = re.split('=', tag)
allTags.append(listTag)
# Prase the information we need for the output
# This is like the worst code i've ever written
logType = ""
logLat = ""
logLng = ""
logAcc = ""
logAlt = ""
for tag in allTags:
if tag[0] == "type":
logType = tag[1]
if tag[0] == "lat":
logLat = tag[1]
if tag[0] == "lng":
logLng = tag[1]
if tag[0] == "acc":
logAcc = tag[1]
if tag[0] == "alt":
logAlt = tag[1]
# Write out the information to a file
outFile = open("locations.db", "a")
outStr = logType + "," + logLat + "," + logLng + "," + logAcc + "," + logAlt + "\n"
outFile.write(outStr)
outFile.close()
print allTags
if __name__ == '__main__':
server_class = BaseHTTPServer.HTTPServer
httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
| {
"content_hash": "e12084cb480ced4443346be72759f091",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 90,
"avg_line_length": 26.027027027027028,
"alnum_prop": 0.6427829698857737,
"repo_name": "Purdue-ACM-SIGAPP/Purdue-App-Old",
"id": "7b53b4622a408e95b983711b3b96d0d0c633e7cd",
"size": "2002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "135511"
},
{
"name": "Python",
"bytes": "2002"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "briefbe.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "5071b236432c077e5e7aae02e19abfe3",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 71,
"avg_line_length": 25.333333333333332,
"alnum_prop": 0.7105263157894737,
"repo_name": "bjarnoldus/briefbe",
"id": "896f945c196ddae0edaacf8f6fccda563a93b03e",
"size": "250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/briefbe/manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3446"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
} |
import random
from typing import Type, TypeVar, Mapping, Any, Iterable, Generator
import pytest
from datapipelines import DataSource, CompositeDataSource, PipelineContext, NotFoundError
#########################################
# Create simple DataSources for testing #
#########################################
T = TypeVar("T")
VALUE_KEY = "value"
COUNT_KEY = "count"
VALUES_COUNT = 100
VALUES_MAX = 100000000
# Seriously where is this in the std lib...
GENERATOR_CLASS = (None for _ in range(0)).__class__
class SimpleWildcardDataSource(DataSource):
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T:
value = query.get(VALUE_KEY)
try:
# noinspection PyCallingNonCallable
return type(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"{type}\"".format(type=type))
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Generator[T, None, None]:
value = query.get(VALUE_KEY)
count = query.get(COUNT_KEY)
try:
# noinspection PyCallingNonCallable
value = type(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"{type}\"".format(type=type))
return (value for _ in range(count))
class IntFloatDataSource(DataSource):
@DataSource.dispatch
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@get.register(int)
def get_int(self, query: Mapping[str, Any], context: PipelineContext = None) -> int:
value = query.get(VALUE_KEY)
try:
return int(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"int\"")
@get_many.register(int)
def get_many_int(self, query: Mapping[str, Any], context: PipelineContext = None) -> Generator[int, None, None]:
value = query.get(VALUE_KEY)
count = query.get(COUNT_KEY)
try:
value = int(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"int\"")
return (value for _ in range(count))
@get.register(float)
def get_float(self, query: Mapping[str, Any], context: PipelineContext = None) -> float:
value = query.get(VALUE_KEY)
try:
return float(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"float\"")
@get_many.register(float)
def get_many_float(self, query: Mapping[str, Any], context: PipelineContext = None) -> Generator[float, None, None]:
value = query.get(VALUE_KEY)
count = query.get(COUNT_KEY)
try:
value = float(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"float\"")
return (value for _ in range(count))
class StringDataSource(DataSource):
@DataSource.dispatch
def get(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> T:
pass
@DataSource.dispatch
def get_many(self, type: Type[T], query: Mapping[str, Any], context: PipelineContext = None) -> Iterable[T]:
pass
@get.register(str)
def get_str(self, query: Mapping[str, Any], context: PipelineContext = None) -> str:
value = query.get(VALUE_KEY)
try:
return str(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"str\"")
@get_many.register(str)
def get_many_str(self, query: Mapping[str, Any], context: PipelineContext = None) -> Generator[str, None, None]:
value = query.get(VALUE_KEY)
count = query.get(COUNT_KEY)
try:
value = str(value)
except ValueError:
raise NotFoundError("Couldn't cast the query value to \"str\"")
return (value for _ in range(count))
########################
# Unsupported Function #
########################
def test_unsupported():
from datapipelines import UnsupportedError
unsupported = DataSource.unsupported(int)
assert type(unsupported) is UnsupportedError
unsupported = DataSource.unsupported(float)
assert type(unsupported) is UnsupportedError
unsupported = DataSource.unsupported(str)
assert type(unsupported) is UnsupportedError
#####################
# Provides Function #
#####################
def test_provides():
source = IntFloatDataSource()
assert source.provides == {int, float}
def test_wildcard_provides():
from datapipelines import TYPE_WILDCARD
source = SimpleWildcardDataSource()
assert source.provides is TYPE_WILDCARD
################
# Get Function #
################
def test_get():
source = IntFloatDataSource()
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(int, query)
assert type(result) is int
assert result == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(float, query)
assert type(result) is float
assert result == value
def test_get_unsupported():
from datapipelines import UnsupportedError
source = IntFloatDataSource()
query = {VALUE_KEY: "test"}
with pytest.raises(UnsupportedError):
source.get(str, query)
def test_wildcard_get():
source = SimpleWildcardDataSource()
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(int, query)
assert type(result) is int
assert result == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(float, query)
assert type(result) is float
assert result == value
#####################
# Get Many Function #
#####################
def test_get_many():
source = IntFloatDataSource()
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(int, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is int
assert res == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(float, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is float
assert res == value
def test_get_many_unsupported():
from datapipelines import UnsupportedError
source = IntFloatDataSource()
query = {VALUE_KEY: "test", COUNT_KEY: VALUES_COUNT}
with pytest.raises(UnsupportedError):
source.get_many(str, query)
def test_wildcard_get_many():
source = SimpleWildcardDataSource()
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(int, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is int
assert res == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(float, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is float
assert res == value
#######################
# CompositeDataSource #
#######################
def test_composite_provides():
source = CompositeDataSource({IntFloatDataSource(), StringDataSource()})
assert source.provides == {int, float, str}
def test_composite_get():
source = CompositeDataSource({IntFloatDataSource(), StringDataSource()})
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(int, query)
assert type(result) is int
assert result == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(float, query)
assert type(result) is float
assert result == value
values = [str(random.uniform(-VALUES_MAX, VALUES_MAX)) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value}
result = source.get(str, query)
assert type(result) is str
assert result == value
def test_composite_get_unsupported():
from datapipelines import UnsupportedError
source = CompositeDataSource({IntFloatDataSource(), StringDataSource()})
query = {VALUE_KEY: bytes()}
with pytest.raises(UnsupportedError):
source.get(bytes, query)
def test_composite_get_many():
source = CompositeDataSource({IntFloatDataSource(), StringDataSource()})
values = [random.randint(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(int, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is int
assert res == value
values = [random.uniform(-VALUES_MAX, VALUES_MAX) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(float, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is float
assert res == value
values = [str(random.uniform(-VALUES_MAX, VALUES_MAX)) for _ in range(VALUES_COUNT)]
for value in values:
query = {VALUE_KEY: value, COUNT_KEY: VALUES_COUNT}
result = source.get_many(str, query)
assert type(result) is GENERATOR_CLASS
for res in result:
assert type(res) is str
assert res == value
def test_composite_get_many_unsupported():
from datapipelines import UnsupportedError
source = CompositeDataSource({IntFloatDataSource(), StringDataSource()})
query = {VALUE_KEY: bytes(), COUNT_KEY: VALUES_COUNT}
with pytest.raises(UnsupportedError):
source.get_many(bytes, query)
| {
"content_hash": "3f77dcc77ac7474e94c03bf641fffb0b",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 125,
"avg_line_length": 29.157068062827225,
"alnum_prop": 0.6219249416412282,
"repo_name": "sserrot/champion_relationships",
"id": "f6c45260ecf6debde8798c62ccc42c732cda8178",
"size": "11138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/tests/test_sources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
def extractAtenTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'Skill Taker' in item['tags'] or 'Skill Taker Ch' in item['title']:
return buildReleaseMessageWithType(item, 'Skill Taker’s World Domination ~ Building a Slave Harem from Scratch', vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "316b0e14be506d103304af55001191ce",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 152,
"avg_line_length": 43.4,
"alnum_prop": 0.7258064516129032,
"repo_name": "fake-name/ReadableWebProxy",
"id": "c7f6ef2b51beb217b973570c3ad3df5ad28bd89b",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractAtenTranslations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
""" Main app entry / wsgi entry point """
import logging
import sys
import config
from lazyblacksmith.app import create_app
APP = create_app(config)
def set_loggers():
""" Define logger format and level for the whole app """
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(formatter)
logger = logging.getLogger('lb.utils')
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
logger = logging.getLogger('lb.ajax')
logger.addHandler(console)
logger.setLevel(logging.DEBUG)
logger = logging.getLogger('sqlalchemy.engine')
logger.addHandler(console)
logger.setLevel(logging.ERROR) # DEBUG for queries + results
if __name__ == '__main__':
set_loggers()
if config.DEBUG:
try:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(APP)
except ImportError:
print("Library 'flask-debugtoolbar' is missing. Please install it using 'pip'")
sys.exit()
APP.run(port=config.PORT, host=config.HOST)
| {
"content_hash": "a8dfde18ccc80d2511e5ad10eb071215",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 91,
"avg_line_length": 27.863636363636363,
"alnum_prop": 0.6492659053833605,
"repo_name": "Kyria/LazyBlacksmith",
"id": "9e0cd8bfe40253823da6fefea4761c082db94627",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2005"
},
{
"name": "HTML",
"bytes": "219613"
},
{
"name": "JavaScript",
"bytes": "402713"
},
{
"name": "Mako",
"bytes": "557"
},
{
"name": "Python",
"bytes": "192854"
},
{
"name": "SCSS",
"bytes": "226990"
},
{
"name": "Shell",
"bytes": "1707"
}
],
"symlink_target": ""
} |
"""Add event log.
Revision ID: d17d4d4fd1ee
Revises: 95779b509fe4
Create Date: 2016-07-22 12:21:14.296489
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'd17d4d4fd1ee'
down_revision = '95779b509fe4'
def upgrade():
op.create_table('event_log',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('action', sa.Unicode(length=255), nullable=True),
sa.Column('source_ip', sa.Unicode(length=255), nullable=True),
sa.Column('path', sa.Unicode(), nullable=True),
sa.Column('query', postgresql.JSONB(), nullable=True),
sa.Column('data', postgresql.JSONB(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['role.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_event_log_action'), 'event_log', ['action'], unique=False)
def downgrade():
op.drop_index(op.f('ix_event_log_action'), table_name='event_log')
op.drop_table('event_log')
| {
"content_hash": "bef5732c5e0d551aa64ff7e93f420930",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 87,
"avg_line_length": 34.02777777777778,
"alnum_prop": 0.6579591836734694,
"repo_name": "gazeti/aleph",
"id": "92c64834ac0b04e95c7e2d2bc2bb879d1dd3f193",
"size": "1225",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "aleph/migrate/versions/d17d4d4fd1ee_add_event_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15949"
},
{
"name": "HTML",
"bytes": "126390"
},
{
"name": "JavaScript",
"bytes": "112602"
},
{
"name": "Makefile",
"bytes": "1473"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "446197"
},
{
"name": "Shell",
"bytes": "821"
}
],
"symlink_target": ""
} |
from glumpy import library
from glumpy.transforms.transform import Transform
import numpy as np
class Translate(Transform):
"""
Translation transform
:param 3-tuple translate:
Translation vector. Default is (0,0,0).
The transform is connected to the following events:
* ``on_attach``: Transform initialization
**Usage example**:
.. code:: python
vertex = '''
attribute vec2 position;
void main()
{
gl_Position = <transform>;
} '''
...
window = app.Window(width=800, height=800)
program = gloo.Program(vertex, fragment, count=4)
...
program['transform'] = Translate("position", translate=(0,0,0))
window.attach(program['transform'])
...
"""
aliases = { "translate" : "translate_translate" }
def __init__(self, *args, **kwargs):
code = library.get("transforms/translate.glsl")
Transform.__init__(self, code, *args, **kwargs)
self.translate = Transform._get_kwarg("translate", kwargs) or (0,0,0)
@property
def translate(self):
""" Translate vector """
return self._translate
@translate.setter
def translate(self, value):
""" Translate vector """
self._translate = np.asarray(value,dtype=np.float32)
if self.is_attached:
self["translate"] = self._translate
def on_attach(self, program):
self["translate"] = self._translate
| {
"content_hash": "6c688977d0877092b31baf4b6b2ca49f",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 77,
"avg_line_length": 24.047619047619047,
"alnum_prop": 0.5808580858085809,
"repo_name": "glumpy/glumpy",
"id": "64a180be677a006e80d7b20aa9fa69cdd94edb98",
"size": "1785",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "glumpy/transforms/translate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.core.exceptions import FieldError
from django.db import models, DEFAULT_DB_ALIAS
from django.db.models import signals
from django.test import TestCase
from .models import (MyPerson, Person, StatusPerson, LowerStatusPerson,
MyPersonProxy, Abstract, OtherPerson, User, UserProxy, UserProxyProxy,
Country, State, StateProxy, TrackerUser, BaseUser, Bug, ProxyTrackerUser,
Improvement, ProxyProxyBug, ProxyBug, ProxyImprovement)
class ProxyModelTests(TestCase):
def test_same_manager_queries(self):
"""
The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).
"""
my_person_sql = MyPerson.other.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by("name").query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
def test_inheretance_new_table(self):
"""
The StatusPerson models should have its own table (it's using ORM-level
inheritance).
"""
sp_sql = StatusPerson.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(
DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
def test_basic_proxy(self):
"""
Creating a Person makes them accessible through the MyPerson proxy.
"""
person = Person.objects.create(name="Foo McBar")
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name="Foo McBar").id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
def test_no_proxy(self):
"""
Person is not proxied by StatusPerson subclass.
"""
Person.objects.create(name="Foo McBar")
self.assertEqual(list(StatusPerson.objects.all()), [])
def test_basic_proxy_reverse(self):
"""
A new MyPerson also shows up as a standard Person.
"""
MyPerson.objects.create(name="Bazza del Frob")
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status="low", name="homer")
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, ["homer"])
def test_correct_type_proxy_of_proxy(self):
"""
Correct type when querying a proxy of proxy
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
pp = sorted([mpp.name for mpp in MyPersonProxy.objects.all()])
self.assertEqual(pp, ['Bazza del Frob', 'Foo McBar', 'homer'])
def test_proxy_included_in_ancestors(self):
"""
Proxy models are included in the ancestors for a model's DoesNotExist
and MultipleObjectsReturned
"""
Person.objects.create(name="Foo McBar")
MyPerson.objects.create(name="Bazza del Frob")
LowerStatusPerson.objects.create(status="low", name="homer")
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.DoesNotExist,
MyPersonProxy.objects.get,
name='Zathras'
)
self.assertRaises(Person.MultipleObjectsReturned,
MyPersonProxy.objects.get,
id__lt=max_id+1
)
self.assertRaises(Person.DoesNotExist,
StatusPerson.objects.get,
name='Zathras'
)
sp1 = StatusPerson.objects.create(name='Bazza Jr.')
sp2 = StatusPerson.objects.create(name='Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max('id'))['max_id']
self.assertRaises(Person.MultipleObjectsReturned,
StatusPerson.objects.get,
id__lt=max_id+1
)
def test_abc(self):
"""
All base classes must be non-abstract
"""
def build_abc():
class NoAbstract(Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
def test_no_cbc(self):
"""
The proxy must actually have one concrete base class
"""
def build_no_cbc():
class TooManyBases(Person, Abstract):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
def test_no_base_classes(self):
def build_no_base_classes():
class NoBaseClasses(models.Model):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_base_classes)
def test_new_fields(self):
def build_new_fields():
class NoNewFields(Person):
newfield = models.BooleanField()
class Meta:
proxy = True
self.assertRaises(FieldError, build_new_fields)
def test_myperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in MyPerson.objects.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in MyPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'fred'])
def test_otherperson_manager(self):
Person.objects.create(name="fred")
Person.objects.create(name="wilma")
Person.objects.create(name="barney")
resp = [p.name for p in OtherPerson.objects.all()]
self.assertEqual(resp, ['barney', 'wilma'])
resp = [p.name for p in OtherPerson.excluder.all()]
self.assertEqual(resp, ['barney', 'fred'])
resp = [p.name for p in OtherPerson._default_manager.all()]
self.assertEqual(resp, ['barney', 'wilma'])
def test_proxy_model_signals(self):
"""
Test save signals for proxy models
"""
output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append('%s %s save' % (model, event))
return _handler
h1 = make_handler('MyPerson', 'pre')
h2 = make_handler('MyPerson', 'post')
h3 = make_handler('Person', 'pre')
h4 = make_handler('Person', 'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
dino = MyPerson.objects.create(name=u"dino")
self.assertEqual(output, [
'MyPerson pre save',
'MyPerson post save'
])
output = []
h5 = make_handler('MyPersonProxy', 'pre')
h6 = make_handler('MyPersonProxy', 'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
dino = MyPersonProxy.objects.create(name=u"pebbles")
self.assertEqual(output, [
'MyPersonProxy pre save',
'MyPersonProxy post save'
])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
def test_content_type(self):
ctype = ContentType.objects.get_for_model
self.assertTrue(ctype(Person) is ctype(OtherPerson))
def test_user_userproxy_userproxyproxy(self):
User.objects.create(name='Bruce')
resp = [u.name for u in User.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
resp = [u.name for u in UserProxyProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_proxy_delete(self):
"""
Proxy objects can be deleted
"""
User.objects.create(name='Bruce')
u2 = UserProxy.objects.create(name='George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce', 'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, ['Bruce'])
def test_select_related(self):
"""
We can still use `select_related()` to include related models in our
querysets.
"""
country = Country.objects.create(name='Australia')
state = State.objects.create(name='New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, ['New South Wales'])
self.assertEqual(StateProxy.objects.get(name='New South Wales').name,
'New South Wales')
resp = StateProxy.objects.select_related().get(name='New South Wales')
self.assertEqual(resp.name, 'New South Wales')
def test_proxy_bug(self):
contributor = TrackerUser.objects.create(name='Contributor',
status='contrib')
someone = BaseUser.objects.create(name='Someone')
Bug.objects.create(summary='fix this', version='1.1beta',
assignee=contributor, reporter=someone)
pcontributor = ProxyTrackerUser.objects.create(name='OtherContributor',
status='proxy')
Improvement.objects.create(summary='improve that', version='1.1beta',
assignee=contributor, reporter=pcontributor,
associated_bug=ProxyProxyBug.objects.all()[0])
# Related field filter on proxy
resp = ProxyBug.objects.get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Select related + filter on proxy
resp = ProxyBug.objects.select_related().get(version__icontains='beta')
self.assertEqual(repr(resp), '<ProxyBug: ProxyBug:fix this>')
# Proxy of proxy, select_related + filter
resp = ProxyProxyBug.objects.select_related().get(
version__icontains='beta'
)
self.assertEqual(repr(resp), '<ProxyProxyBug: ProxyProxyBug:fix this>')
# Select related + filter on a related proxy field
resp = ProxyImprovement.objects.select_related().get(
reporter__name__icontains='butor'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
# Select related + filter on a related proxy of proxy field
resp = ProxyImprovement.objects.select_related().get(
associated_bug__summary__icontains='fix'
)
self.assertEqual(repr(resp),
'<ProxyImprovement: ProxyImprovement:improve that>'
)
def test_proxy_load_from_fixture(self):
management.call_command('loaddata', 'mypeople.json', verbosity=0, commit=False)
p = MyPerson.objects.get(pk=100)
self.assertEqual(p.name, 'Elvis Presley')
| {
"content_hash": "b14a4c8b0824b07442fd093f1e72f2b9",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 87,
"avg_line_length": 37.00317460317461,
"alnum_prop": 0.6177934111187371,
"repo_name": "mixman/djangodev",
"id": "3ec84656893f9631a9acb9417b76305022906607",
"size": "11656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/modeltests/proxy_models/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "88362"
},
{
"name": "Python",
"bytes": "7834206"
},
{
"name": "Shell",
"bytes": "9076"
}
],
"symlink_target": ""
} |
import math
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from ehata import ehata
from itm import pytm
from geo import tropoClim
from geo import refractivity
from geo import ned_indexer
from geo import nlcd_indexer
from geo import land_use
from geo import vincenty
# f in MHz; d and h1/h2 all in meters
def FreeSpacePathLoss(f, d, h1, h2):
r = math.sqrt(d*d + (h1-h2)*(h1-h2))
return 20*math.log10(r) + 20*math.log10(f) - 27.56
class PropagationLossModel:
def __init__(self, itu_dir, ned_dir, nlcd_dir):
self.climIndx = tropoClim.ClimateIndexer(itu_dir)
self.refractivityIndx = refractivity.RefractivityIndexer(itu_dir)
self.nedIndx = ned_indexer.NedIndexer(ned_dir)
self.nlcdIndx = nlcd_indexer.NlcdIndexer(nlcd_dir)
# Calculate the ITM adjusted propagation loss given the
# assumptions on the ITM model.
def ITM_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, reliability):
dielectric_constant = 25.0 # good ground
soil_conductivity = 0.02 # good ground
polarization = 1
confidence = 0.5
# get surface refractivity and radio climate from path midpoint
dist, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
lat_c, lng_c, alpha2 = vincenty.to_dist_bear_vincenty(lat1, lng1, dist/2.0, bearing)
print 'Midpoint = %f, %f' % (lat_c, lng_c)
radio_climate = self.climIndx.TropoClim(lat_c, lng_c)
refractivity = self.refractivityIndx.Refractivity(lat_c, lng_c)
print 'Using climate %d' % radio_climate
print 'Using refractivity %f' % refractivity
print 'Using freq %f' % f
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
print profile[0], profile[1]
#print profile
print 'total distance is ', profile[0]*profile[1]
loss = pytm.point_to_point(profile, h1, h2,
dielectric_constant,
soil_conductivity,
refractivity,
f,
radio_climate,
polarization,
confidence,
reliability)
print 'ITM P2P is ', loss
return loss
# Adjusted propagation loss according to the adjustments in R2-SGN-04
# distance d, heights h1, h2 all in meters
# frequency f in MHz
def ExtendedHata_AdjustedPropagationLoss(self, lat1, lng1, h1, lat2, lng2, h2, f, land_cat):
d, bearing, rev_bearing = vincenty.dist_bear_vincenty(lat1, lng1, lat2, lng2)
d = d*1000.0
print 'EHata distance=', d
if d <= 100.0:
# return FSPL
print 'FSPL'
return FreeSpacePathLoss(f, d, h1, h2)
if d > 100.0 and d <= 1000.0:
print 'interp FSPL and ehata'
# interpolate FSPL and ehata
fspl_loss = FreeSpacePathLoss(f, 100.0, h1, h2)
print ' fspl_loss=', fspl_loss
ehata_loss, abm = ehata.ExtendedHata_MedianBasicPropLoss(f, 1.0, h1, h2, land_cat)
print ' ehata_loss=', ehata_loss
print ' ( abm=', abm
return fspl_loss + (1.0 + math.log10(d/1000.0))*(ehata_loss - fspl_loss)
if d > 1000.0 and d < 80000.0:
# return eHata value without adjustment.
print 'EHata only for d=%f' % d
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
return ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile)
if d >= 80000.0:
print 'EHata for distance %f > 80km' % d
# Derive profile_80km
lat_80, lng_80, heading = vincenty.to_dist_bear_vincenty(lat1, lng1, 80.0, bearing)
print '80km point is %f %f' % (lat_80, lng_80)
profile_80km = self.nedIndx.Profile(lat1, lng1, lat_80, lng_80)
# Find J adjustment...
ehata_loss = ehata.ExtendedHata_PropagationLoss(f, h1, h2, land_cat, profile_80km)
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat_80, lng_80, h2, f, 0.5)
J = ehata_loss - itm_loss
print 'Got ehata=%f itm=%f J=%f' % (ehata_loss, itm_loss, J)
if J < 0.0:
J = 0.0
return self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5) + J
def LandClassification(self, lat, lng):
code = self.nlcdIndx.NlcdCode(lat, lng)
return self.nlcdIndx.NlcdLandCategory(code)
# This is the oracle for propagation loss from point 1 to point 2 at frequency f (Mhz).
def PropagationLoss(self, f, lat1, lng1, h1, lat2, lng2, h2, land_cat=''):
if land_cat == '':
code = self.nlcdIndx.NlcdCode(lat2, lng2)
if code == 11:
code = self.nlcdIndx.NlcdCode(lat1, lng1)
land_cat = land_use.NlcdLandCategory(code)
print 'Using land_cat =', land_cat
# Calculate effective heights of tx and rx:
profile = self.nedIndx.Profile(lat1, lng1, lat2, lng2)
h1eff, h2eff = EffectiveHeights(h1, h2, profile)
if land_cat == 'RURAL' or h1eff >= 200: # Only h1eff (CBSD effective height) counts
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
print 'Returning itm_loss for rural > 200: ', itm_loss
return itm_loss
else:
itm_loss = self.ITM_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, 0.5)
ehata_loss = self.ExtendedHata_AdjustedPropagationLoss(lat1, lng1, h1, lat2, lng2, h2, f, land_cat)
if ehata_loss > itm_loss:
return ehata_loss
return itm_loss
# Run directly, takes args of "lat1, lng1, h1, lat2, lng2, h2, f" and prints the
# (median) propagation loss in dB.
if __name__ == '__main__':
dir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.dirname(os.path.dirname(dir))
ituDir = os.path.join(os.path.join(rootDir, 'data'), 'itu')
nedDir = os.path.join(os.path.join(rootDir, 'data'), 'ned')
nlcdDir = os.path.join(os.path.join(rootDir, 'data'), 'nlcd')
prop = PropagationLossModel(ituDir, nedDir, nlcdDir)
loss = prop.PropagationLoss(float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]),
float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]),
float(sys.argv[7]))
print 'Propagation Loss = ', loss, ' dB'
| {
"content_hash": "9dcf42b73c0584fa0ccd352b716f5659",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 105,
"avg_line_length": 41.54362416107382,
"alnum_prop": 0.6337641357027464,
"repo_name": "gregbillock/Spectrum-Access-System",
"id": "7f1ed02fbcc208a5099d64e386b1ba2a78d3a6aa",
"size": "6985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/prop/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5922"
},
{
"name": "C++",
"bytes": "76858"
},
{
"name": "PowerShell",
"bytes": "11931"
},
{
"name": "Python",
"bytes": "239414"
},
{
"name": "Shell",
"bytes": "9043"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.