commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce3af1f615b0979f8fcaf0fcec2efb1d73c13a36
|
api/tests/v2/test_token_update.py
|
api/tests/v2/test_token_update.py
|
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory, ProviderFactory
from api.v2.views import TokenUpdateViewSet, IdentityViewSet, CredentialViewSet
class TokenUpdateTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create(location="mock location", type__name="mock")
self.view = TokenUpdateViewSet.as_view({'post': 'create'})
self.identity_view = IdentityViewSet.as_view({'get': 'retrieve'})
self.credentials_view = CredentialViewSet.as_view({'get': 'list'})
self.token_uuid = "test-token-1234-debug"
def test_invalid_provider_token_update(self):
factory = APIRequestFactory()
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': "nopenopenope",
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertTrue(response.status_code == 400)
self.assertTrue('provider' in response.data)
self.assertTrue("not a valid UUID" in response.data['provider'])
def test_valid_data_token_update(self):
factory = APIRequestFactory()
provider_uuid = str(self.provider.uuid)
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': provider_uuid,
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 201)
data = response.data
self.assertTrue('identity_uuid' in data)
identity_uuid = data['identity_uuid']
cred_url = reverse('api:v2:credential-list')
cred_request = factory.get(cred_url)
force_authenticate(cred_request, user=self.user)
cred_response = self.credentials_view(cred_request)
self.assertTrue('results' in cred_response.data)
for cred in cred_response.data['results']:
self.assertTrue(cred['identity']['uuid'] == identity_uuid)
if cred['key'] == 'key':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_project_name':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_force_auth_token':
self.assertTrue(cred['value'] == self.token_uuid)
|
Include test file for token_update API
|
Include test file for token_update API
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
|
Include test file for token_update API
|
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory, ProviderFactory
from api.v2.views import TokenUpdateViewSet, IdentityViewSet, CredentialViewSet
class TokenUpdateTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create(location="mock location", type__name="mock")
self.view = TokenUpdateViewSet.as_view({'post': 'create'})
self.identity_view = IdentityViewSet.as_view({'get': 'retrieve'})
self.credentials_view = CredentialViewSet.as_view({'get': 'list'})
self.token_uuid = "test-token-1234-debug"
def test_invalid_provider_token_update(self):
factory = APIRequestFactory()
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': "nopenopenope",
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertTrue(response.status_code == 400)
self.assertTrue('provider' in response.data)
self.assertTrue("not a valid UUID" in response.data['provider'])
def test_valid_data_token_update(self):
factory = APIRequestFactory()
provider_uuid = str(self.provider.uuid)
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': provider_uuid,
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 201)
data = response.data
self.assertTrue('identity_uuid' in data)
identity_uuid = data['identity_uuid']
cred_url = reverse('api:v2:credential-list')
cred_request = factory.get(cred_url)
force_authenticate(cred_request, user=self.user)
cred_response = self.credentials_view(cred_request)
self.assertTrue('results' in cred_response.data)
for cred in cred_response.data['results']:
self.assertTrue(cred['identity']['uuid'] == identity_uuid)
if cred['key'] == 'key':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_project_name':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_force_auth_token':
self.assertTrue(cred['value'] == self.token_uuid)
|
<commit_before><commit_msg>Include test file for token_update API<commit_after>
|
from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory, ProviderFactory
from api.v2.views import TokenUpdateViewSet, IdentityViewSet, CredentialViewSet
class TokenUpdateTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create(location="mock location", type__name="mock")
self.view = TokenUpdateViewSet.as_view({'post': 'create'})
self.identity_view = IdentityViewSet.as_view({'get': 'retrieve'})
self.credentials_view = CredentialViewSet.as_view({'get': 'list'})
self.token_uuid = "test-token-1234-debug"
def test_invalid_provider_token_update(self):
factory = APIRequestFactory()
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': "nopenopenope",
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertTrue(response.status_code == 400)
self.assertTrue('provider' in response.data)
self.assertTrue("not a valid UUID" in response.data['provider'])
def test_valid_data_token_update(self):
factory = APIRequestFactory()
provider_uuid = str(self.provider.uuid)
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': provider_uuid,
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 201)
data = response.data
self.assertTrue('identity_uuid' in data)
identity_uuid = data['identity_uuid']
cred_url = reverse('api:v2:credential-list')
cred_request = factory.get(cred_url)
force_authenticate(cred_request, user=self.user)
cred_response = self.credentials_view(cred_request)
self.assertTrue('results' in cred_response.data)
for cred in cred_response.data['results']:
self.assertTrue(cred['identity']['uuid'] == identity_uuid)
if cred['key'] == 'key':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_project_name':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_force_auth_token':
self.assertTrue(cred['value'] == self.token_uuid)
|
Include test file for token_update APIfrom django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory, ProviderFactory
from api.v2.views import TokenUpdateViewSet, IdentityViewSet, CredentialViewSet
class TokenUpdateTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create(location="mock location", type__name="mock")
self.view = TokenUpdateViewSet.as_view({'post': 'create'})
self.identity_view = IdentityViewSet.as_view({'get': 'retrieve'})
self.credentials_view = CredentialViewSet.as_view({'get': 'list'})
self.token_uuid = "test-token-1234-debug"
def test_invalid_provider_token_update(self):
factory = APIRequestFactory()
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': "nopenopenope",
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertTrue(response.status_code == 400)
self.assertTrue('provider' in response.data)
self.assertTrue("not a valid UUID" in response.data['provider'])
def test_valid_data_token_update(self):
factory = APIRequestFactory()
provider_uuid = str(self.provider.uuid)
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': provider_uuid,
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 201)
data = response.data
self.assertTrue('identity_uuid' in data)
identity_uuid = data['identity_uuid']
cred_url = reverse('api:v2:credential-list')
cred_request = factory.get(cred_url)
force_authenticate(cred_request, user=self.user)
cred_response = self.credentials_view(cred_request)
self.assertTrue('results' in cred_response.data)
for cred in cred_response.data['results']:
self.assertTrue(cred['identity']['uuid'] == identity_uuid)
if cred['key'] == 'key':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_project_name':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_force_auth_token':
self.assertTrue(cred['value'] == self.token_uuid)
|
<commit_before><commit_msg>Include test file for token_update API<commit_after>from django.core.urlresolvers import reverse
from rest_framework.test import APITestCase, APIRequestFactory, force_authenticate
from api.tests.factories import UserFactory, AnonymousUserFactory, ProviderFactory
from api.v2.views import TokenUpdateViewSet, IdentityViewSet, CredentialViewSet
class TokenUpdateTests(APITestCase):
def setUp(self):
self.anonymous_user = AnonymousUserFactory()
self.user = UserFactory.create()
self.provider = ProviderFactory.create(location="mock location", type__name="mock")
self.view = TokenUpdateViewSet.as_view({'post': 'create'})
self.identity_view = IdentityViewSet.as_view({'get': 'retrieve'})
self.credentials_view = CredentialViewSet.as_view({'get': 'list'})
self.token_uuid = "test-token-1234-debug"
def test_invalid_provider_token_update(self):
factory = APIRequestFactory()
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': "nopenopenope",
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertTrue(response.status_code == 400)
self.assertTrue('provider' in response.data)
self.assertTrue("not a valid UUID" in response.data['provider'])
def test_valid_data_token_update(self):
factory = APIRequestFactory()
provider_uuid = str(self.provider.uuid)
url = reverse('api:v2:token_update-list')
data = {
'username': self.user.username,
'project_name': self.user.username,
'provider': provider_uuid,
'token': self.token_uuid
}
request = factory.post(url, data)
force_authenticate(request, user=self.user)
response = self.view(request)
self.assertEquals(response.status_code, 201)
data = response.data
self.assertTrue('identity_uuid' in data)
identity_uuid = data['identity_uuid']
cred_url = reverse('api:v2:credential-list')
cred_request = factory.get(cred_url)
force_authenticate(cred_request, user=self.user)
cred_response = self.credentials_view(cred_request)
self.assertTrue('results' in cred_response.data)
for cred in cred_response.data['results']:
self.assertTrue(cred['identity']['uuid'] == identity_uuid)
if cred['key'] == 'key':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_project_name':
self.assertTrue(cred['value'] == self.user.username)
elif cred['key'] == 'ex_force_auth_token':
self.assertTrue(cred['value'] == self.token_uuid)
|
|
3c5d21b41c78b87e1567453b4a6f15ca3d97c966
|
pyblogit/blog_model.py
|
pyblogit/blog_model.py
|
"""
pyblogit.blog_model
~~~~~~~~~~~~~~~~~~~
This module contains the data model to represent a blog and methods to
manipulate it.
"""
class blog(object):
"""The blog data model"""
def __init__(self, blog_id, name, url, desc, posts, pages):
self._blog_id = blog_id
self._name = name
self._url = url
self._desc = desc
self._posts = posts
self._pages = pages
@property
def blog_id(self):
return self._blog_id
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def desc(self):
return self._desc
@property
def posts(self):
return self._posts
@property
delf pages(self):
return self._pages
|
Add class to represent a blog
|
Add class to represent a blog
|
Python
|
mit
|
jamalmoir/pyblogit
|
Add class to represent a blog
|
"""
pyblogit.blog_model
~~~~~~~~~~~~~~~~~~~
This module contains the data model to represent a blog and methods to
manipulate it.
"""
class blog(object):
"""The blog data model"""
def __init__(self, blog_id, name, url, desc, posts, pages):
self._blog_id = blog_id
self._name = name
self._url = url
self._desc = desc
self._posts = posts
self._pages = pages
@property
def blog_id(self):
return self._blog_id
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def desc(self):
return self._desc
@property
def posts(self):
return self._posts
@property
delf pages(self):
return self._pages
|
<commit_before><commit_msg>Add class to represent a blog<commit_after>
|
"""
pyblogit.blog_model
~~~~~~~~~~~~~~~~~~~
This module contains the data model to represent a blog and methods to
manipulate it.
"""
class blog(object):
"""The blog data model"""
def __init__(self, blog_id, name, url, desc, posts, pages):
self._blog_id = blog_id
self._name = name
self._url = url
self._desc = desc
self._posts = posts
self._pages = pages
@property
def blog_id(self):
return self._blog_id
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def desc(self):
return self._desc
@property
def posts(self):
return self._posts
@property
delf pages(self):
return self._pages
|
Add class to represent a blog"""
pyblogit.blog_model
~~~~~~~~~~~~~~~~~~~
This module contains the data model to represent a blog and methods to
manipulate it.
"""
class blog(object):
"""The blog data model"""
def __init__(self, blog_id, name, url, desc, posts, pages):
self._blog_id = blog_id
self._name = name
self._url = url
self._desc = desc
self._posts = posts
self._pages = pages
@property
def blog_id(self):
return self._blog_id
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def desc(self):
return self._desc
@property
def posts(self):
return self._posts
@property
delf pages(self):
return self._pages
|
<commit_before><commit_msg>Add class to represent a blog<commit_after>"""
pyblogit.blog_model
~~~~~~~~~~~~~~~~~~~
This module contains the data model to represent a blog and methods to
manipulate it.
"""
class blog(object):
"""The blog data model"""
def __init__(self, blog_id, name, url, desc, posts, pages):
self._blog_id = blog_id
self._name = name
self._url = url
self._desc = desc
self._posts = posts
self._pages = pages
@property
def blog_id(self):
return self._blog_id
@property
def name(self):
return self._name
@property
def url(self):
return self._url
@property
def desc(self):
return self._desc
@property
def posts(self):
return self._posts
@property
delf pages(self):
return self._pages
|
|
9a80f30acc2b57c37726d765977ff471b0033e1a
|
ideascube/conf/idb_jor_zaatari.py
|
ideascube/conf/idb_jor_zaatari.py
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Zaatari"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['en','ar']
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'wikibooks',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['en', 'ar']
},
{
'id': 'wikiquote',
'languages': ['en', 'ar']
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
('ted-ed', 'Education'),
('tedmed.en', 'Medical'),
]
},
{
'id': 'gutenberg',
'languages': ['en', 'fr']
},
{
'id': 'crashcourse',
'languages': ['en']
},
{
'id': 'vikidia',
'languages': ['en']
},
]
|
Add configuration for Zaatari, Jordan.
|
Add configuration for Zaatari, Jordan.
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add configuration for Zaatari, Jordan.
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Zaatari"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['en','ar']
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'wikibooks',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['en', 'ar']
},
{
'id': 'wikiquote',
'languages': ['en', 'ar']
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
('ted-ed', 'Education'),
('tedmed.en', 'Medical'),
]
},
{
'id': 'gutenberg',
'languages': ['en', 'fr']
},
{
'id': 'crashcourse',
'languages': ['en']
},
{
'id': 'vikidia',
'languages': ['en']
},
]
|
<commit_before><commit_msg>Add configuration for Zaatari, Jordan.<commit_after>
|
# -*- coding: utf-8 -*-
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Zaatari"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['en','ar']
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'wikibooks',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['en', 'ar']
},
{
'id': 'wikiquote',
'languages': ['en', 'ar']
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
('ted-ed', 'Education'),
('tedmed.en', 'Medical'),
]
},
{
'id': 'gutenberg',
'languages': ['en', 'fr']
},
{
'id': 'crashcourse',
'languages': ['en']
},
{
'id': 'vikidia',
'languages': ['en']
},
]
|
Add configuration for Zaatari, Jordan.# -*- coding: utf-8 -*-
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Zaatari"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['en','ar']
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'wikibooks',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['en', 'ar']
},
{
'id': 'wikiquote',
'languages': ['en', 'ar']
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
('ted-ed', 'Education'),
('tedmed.en', 'Medical'),
]
},
{
'id': 'gutenberg',
'languages': ['en', 'fr']
},
{
'id': 'crashcourse',
'languages': ['en']
},
{
'id': 'vikidia',
'languages': ['en']
},
]
|
<commit_before><commit_msg>Add configuration for Zaatari, Jordan.<commit_after># -*- coding: utf-8 -*-
"""Ideaxbox for Zaatari, Jordan"""
from .idb import * # noqa
from django.utils.translation import ugettext_lazy as _
IDEASCUBE_NAME = u"Zaatari"
IDEASCUBE_PLACE_NAME = _("city")
COUNTRIES_FIRST = ['SY', 'JO']
TIME_ZONE = 'Asia/Amman'
LANGUAGE_CODE = 'ar'
LOAN_DURATION = 14
MONITORING_ENTRY_EXPORT_FIELDS = ['serial', 'user_id', 'birth_year', 'gender']
USER_FORM_FIELDS = (
('Ideasbox', ['serial', 'box_awareness']),
(_('Personal informations'), ['short_name', 'full_name', 'birth_year', 'gender', 'id_card_number']), # noqa
(_('Family'), ['marital_status', 'family_status', 'children_under_12', 'children_under_18', 'children_above_18']), # noqa
(_('In the town'), ['current_occupation', 'school_level']),
(_('Language skills'), ['en_level']),
)
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'library',
},
{
'id': 'mediacenter',
},
{
'id': 'khanacademy',
},
{
'id': 'wikipedia',
'languages': ['en','ar']
},
{
'id': 'wiktionary',
'languages': ['en', 'ar']
},
{
'id': 'wikiversity',
'languages': ['en', 'ar']
},
{
'id': 'wikibooks',
'languages': ['en', 'ar']
},
{
'id': 'wikisource',
'languages': ['en', 'ar']
},
{
'id': 'wikiquote',
'languages': ['en', 'ar']
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
('ted-ed', 'Education'),
('tedmed.en', 'Medical'),
]
},
{
'id': 'gutenberg',
'languages': ['en', 'fr']
},
{
'id': 'crashcourse',
'languages': ['en']
},
{
'id': 'vikidia',
'languages': ['en']
},
]
|
|
b0e6556a908c1c520b060ea868fb18ea00bea4d9
|
moderation/migrations/0007_auto_20141207_1025.py
|
moderation/migrations/0007_auto_20141207_1025.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('moderation', '0006_auto_20141104_1434'),
]
operations = [
migrations.AlterModelOptions(
name='moderationlogmsg',
options={'verbose_name': 'log entry', 'verbose_name_plural': 'log entries'},
),
migrations.AlterField(
model_name='moderationlogmsg',
name='comment',
field=models.TextField(verbose_name='log comment'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='logged_by',
field=models.ForeignKey(related_name='log_messages_by', to=settings.AUTH_USER_MODEL, verbose_name='logged by', help_text='Moderator who created the log'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_datetime',
field=models.DateTimeField(verbose_name='date and time recorded', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_type',
field=models.CharField(verbose_name='message type', choices=[('ALL', 'All'), ('INVITATION', 'Invitation'), ('REINVITATION', 'Invitation Resent'), ('APPROVAL', 'Application Approved'), ('REJECTION', 'Application Rejected'), ('DISMISSAL', 'Abuse Report Dismissed'), ('WARNING', 'Official Warning'), ('BANNING', 'Ban User')], max_length=20),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='pertains_to',
field=models.ForeignKey(related_name='log_messages_about', to=settings.AUTH_USER_MODEL, verbose_name='pertains to', help_text='User who moderation log is about'),
),
]
|
Add migration for translation cleanup
|
Add migration for translation cleanup
|
Python
|
bsd-3-clause
|
f3r3nc/connect,nlhkabu/connect,f3r3nc/connect,nlhkabu/connect,f3r3nc/connect,nlhkabu/connect,nlhkabu/connect,f3r3nc/connect
|
Add migration for translation cleanup
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('moderation', '0006_auto_20141104_1434'),
]
operations = [
migrations.AlterModelOptions(
name='moderationlogmsg',
options={'verbose_name': 'log entry', 'verbose_name_plural': 'log entries'},
),
migrations.AlterField(
model_name='moderationlogmsg',
name='comment',
field=models.TextField(verbose_name='log comment'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='logged_by',
field=models.ForeignKey(related_name='log_messages_by', to=settings.AUTH_USER_MODEL, verbose_name='logged by', help_text='Moderator who created the log'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_datetime',
field=models.DateTimeField(verbose_name='date and time recorded', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_type',
field=models.CharField(verbose_name='message type', choices=[('ALL', 'All'), ('INVITATION', 'Invitation'), ('REINVITATION', 'Invitation Resent'), ('APPROVAL', 'Application Approved'), ('REJECTION', 'Application Rejected'), ('DISMISSAL', 'Abuse Report Dismissed'), ('WARNING', 'Official Warning'), ('BANNING', 'Ban User')], max_length=20),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='pertains_to',
field=models.ForeignKey(related_name='log_messages_about', to=settings.AUTH_USER_MODEL, verbose_name='pertains to', help_text='User who moderation log is about'),
),
]
|
<commit_before><commit_msg>Add migration for translation cleanup<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('moderation', '0006_auto_20141104_1434'),
]
operations = [
migrations.AlterModelOptions(
name='moderationlogmsg',
options={'verbose_name': 'log entry', 'verbose_name_plural': 'log entries'},
),
migrations.AlterField(
model_name='moderationlogmsg',
name='comment',
field=models.TextField(verbose_name='log comment'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='logged_by',
field=models.ForeignKey(related_name='log_messages_by', to=settings.AUTH_USER_MODEL, verbose_name='logged by', help_text='Moderator who created the log'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_datetime',
field=models.DateTimeField(verbose_name='date and time recorded', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_type',
field=models.CharField(verbose_name='message type', choices=[('ALL', 'All'), ('INVITATION', 'Invitation'), ('REINVITATION', 'Invitation Resent'), ('APPROVAL', 'Application Approved'), ('REJECTION', 'Application Rejected'), ('DISMISSAL', 'Abuse Report Dismissed'), ('WARNING', 'Official Warning'), ('BANNING', 'Ban User')], max_length=20),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='pertains_to',
field=models.ForeignKey(related_name='log_messages_about', to=settings.AUTH_USER_MODEL, verbose_name='pertains to', help_text='User who moderation log is about'),
),
]
|
Add migration for translation cleanup# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('moderation', '0006_auto_20141104_1434'),
]
operations = [
migrations.AlterModelOptions(
name='moderationlogmsg',
options={'verbose_name': 'log entry', 'verbose_name_plural': 'log entries'},
),
migrations.AlterField(
model_name='moderationlogmsg',
name='comment',
field=models.TextField(verbose_name='log comment'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='logged_by',
field=models.ForeignKey(related_name='log_messages_by', to=settings.AUTH_USER_MODEL, verbose_name='logged by', help_text='Moderator who created the log'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_datetime',
field=models.DateTimeField(verbose_name='date and time recorded', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_type',
field=models.CharField(verbose_name='message type', choices=[('ALL', 'All'), ('INVITATION', 'Invitation'), ('REINVITATION', 'Invitation Resent'), ('APPROVAL', 'Application Approved'), ('REJECTION', 'Application Rejected'), ('DISMISSAL', 'Abuse Report Dismissed'), ('WARNING', 'Official Warning'), ('BANNING', 'Ban User')], max_length=20),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='pertains_to',
field=models.ForeignKey(related_name='log_messages_about', to=settings.AUTH_USER_MODEL, verbose_name='pertains to', help_text='User who moderation log is about'),
),
]
|
<commit_before><commit_msg>Add migration for translation cleanup<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('moderation', '0006_auto_20141104_1434'),
]
operations = [
migrations.AlterModelOptions(
name='moderationlogmsg',
options={'verbose_name': 'log entry', 'verbose_name_plural': 'log entries'},
),
migrations.AlterField(
model_name='moderationlogmsg',
name='comment',
field=models.TextField(verbose_name='log comment'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='logged_by',
field=models.ForeignKey(related_name='log_messages_by', to=settings.AUTH_USER_MODEL, verbose_name='logged by', help_text='Moderator who created the log'),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_datetime',
field=models.DateTimeField(verbose_name='date and time recorded', default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='msg_type',
field=models.CharField(verbose_name='message type', choices=[('ALL', 'All'), ('INVITATION', 'Invitation'), ('REINVITATION', 'Invitation Resent'), ('APPROVAL', 'Application Approved'), ('REJECTION', 'Application Rejected'), ('DISMISSAL', 'Abuse Report Dismissed'), ('WARNING', 'Official Warning'), ('BANNING', 'Ban User')], max_length=20),
),
migrations.AlterField(
model_name='moderationlogmsg',
name='pertains_to',
field=models.ForeignKey(related_name='log_messages_about', to=settings.AUTH_USER_MODEL, verbose_name='pertains to', help_text='User who moderation log is about'),
),
]
|
|
50ed5c6566fb684bfea125d7084a5972edbe5c2a
|
test/unit/test_log.py
|
test/unit/test_log.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
def test_create():
'''Test creating a Log instance.'''
log = Log(name='bark.test.log')
assert log.items() == [('name', 'bark.test.log')]
def test_string_represenation():
'''Test string representation of Log instance.'''
log = Log(name='bark.test.log')
assert str(log) == "{'name': 'bark.test.log'}"
def test_length():
'''Test len method returns number of current keys.'''
log = Log(name='bark.test.log')
assert len(log) == 1
log['message'] = 'A message'
assert len(log) == 2
def test_setting_and_getting_item():
'''Test setting and getting key value pair.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
def test_delete_item():
'''Test removing an item.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
del log['message']
assert len(log) == 0
with pytest.raises(KeyError):
log['message']
|
Add basic unit tests for Log.
|
Add basic unit tests for Log.
|
Python
|
apache-2.0
|
4degrees/mill,4degrees/sawmill
|
Add basic unit tests for Log.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
def test_create():
'''Test creating a Log instance.'''
log = Log(name='bark.test.log')
assert log.items() == [('name', 'bark.test.log')]
def test_string_represenation():
'''Test string representation of Log instance.'''
log = Log(name='bark.test.log')
assert str(log) == "{'name': 'bark.test.log'}"
def test_length():
'''Test len method returns number of current keys.'''
log = Log(name='bark.test.log')
assert len(log) == 1
log['message'] = 'A message'
assert len(log) == 2
def test_setting_and_getting_item():
'''Test setting and getting key value pair.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
def test_delete_item():
'''Test removing an item.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
del log['message']
assert len(log) == 0
with pytest.raises(KeyError):
log['message']
|
<commit_before><commit_msg>Add basic unit tests for Log.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
def test_create():
'''Test creating a Log instance.'''
log = Log(name='bark.test.log')
assert log.items() == [('name', 'bark.test.log')]
def test_string_represenation():
'''Test string representation of Log instance.'''
log = Log(name='bark.test.log')
assert str(log) == "{'name': 'bark.test.log'}"
def test_length():
'''Test len method returns number of current keys.'''
log = Log(name='bark.test.log')
assert len(log) == 1
log['message'] = 'A message'
assert len(log) == 2
def test_setting_and_getting_item():
'''Test setting and getting key value pair.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
def test_delete_item():
'''Test removing an item.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
del log['message']
assert len(log) == 0
with pytest.raises(KeyError):
log['message']
|
Add basic unit tests for Log.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
def test_create():
'''Test creating a Log instance.'''
log = Log(name='bark.test.log')
assert log.items() == [('name', 'bark.test.log')]
def test_string_represenation():
'''Test string representation of Log instance.'''
log = Log(name='bark.test.log')
assert str(log) == "{'name': 'bark.test.log'}"
def test_length():
'''Test len method returns number of current keys.'''
log = Log(name='bark.test.log')
assert len(log) == 1
log['message'] = 'A message'
assert len(log) == 2
def test_setting_and_getting_item():
'''Test setting and getting key value pair.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
def test_delete_item():
'''Test removing an item.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
del log['message']
assert len(log) == 0
with pytest.raises(KeyError):
log['message']
|
<commit_before><commit_msg>Add basic unit tests for Log.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
def test_create():
'''Test creating a Log instance.'''
log = Log(name='bark.test.log')
assert log.items() == [('name', 'bark.test.log')]
def test_string_represenation():
'''Test string representation of Log instance.'''
log = Log(name='bark.test.log')
assert str(log) == "{'name': 'bark.test.log'}"
def test_length():
'''Test len method returns number of current keys.'''
log = Log(name='bark.test.log')
assert len(log) == 1
log['message'] = 'A message'
assert len(log) == 2
def test_setting_and_getting_item():
'''Test setting and getting key value pair.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
def test_delete_item():
'''Test removing an item.'''
log = Log()
assert len(log) == 0
log['message'] = 'A message'
assert len(log) == 1
assert log['message'] == 'A message'
del log['message']
assert len(log) == 0
with pytest.raises(KeyError):
log['message']
|
|
b5a5f381d69260df65248d0e2ff99b9a7c47ffe7
|
tests/test_general.py
|
tests/test_general.py
|
import os
import unittest
from mpower.opr import OPR
from mpower.store import Store
from . import MP_ACCESS_TOKENS
class TestGeneral(unittest.TestCase):
"""General/Miscellaneous tests"""
def setUp(self):
# Your MPower developer tokens
self.store = Store({"name":"FooBar store"})
self.opr_data = {'total_amount': 345, 'description': "Hello World",
"account_alias":"0266636984"}
self.opr = OPR(self.opr_data, self.store)
def tearDown(self):
self.opr = None
self.store = None
self.opr_data = None
def test_runtime_configs(self):
self.assertEqual(MP_ACCESS_TOKENS,
self.opr.runtime_configs)
def test_system_configs_env(self):
os.environ['MP-Master-Key'] = "5b9f531a-fbb8-487a-8045-3b4c7ac5acee"
os.environ['MP-Private-Key'] = "test_private_oGslgmzSNL3RSkjlsnPOsZZg9IA"
os.environ['MP-Token'] = "ff1d576409b2587cc1c2"
self.assertTrue(self.opr.config)
def test_rsc_endpoints(self):
endpoint = 'checkout-invoice/confirm/test_98567JGF'
url= self.opr.get_rsc_endpoint(endpoint)
self.assertTrue(url.startswith('https') and url.endswith(endpoint))
def test_add_headers(self):
header = {'Foo':'Bar'}
self.opr.add_header(header)
self.assertTrue("Foo" in self.opr.headers.keys())
self.assertFalse('FooBar' in self.opr.headers.keys())
if __name__ == '__main__':
unittest.main()
|
Add a 'general' unit tests file
|
Add a 'general' unit tests file
This 'general' unit tests is responsible for all
miscellaneous/general functionalities
|
Python
|
mit
|
mawuli/mpower-python,rpip/mpower-python
|
Add a 'general' unit tests file
This 'general' unit tests is responsible for all
miscellaneous/general functionalities
|
import os
import unittest
from mpower.opr import OPR
from mpower.store import Store
from . import MP_ACCESS_TOKENS
class TestGeneral(unittest.TestCase):
"""General/Miscellaneous tests"""
def setUp(self):
# Your MPower developer tokens
self.store = Store({"name":"FooBar store"})
self.opr_data = {'total_amount': 345, 'description': "Hello World",
"account_alias":"0266636984"}
self.opr = OPR(self.opr_data, self.store)
def tearDown(self):
self.opr = None
self.store = None
self.opr_data = None
def test_runtime_configs(self):
self.assertEqual(MP_ACCESS_TOKENS,
self.opr.runtime_configs)
def test_system_configs_env(self):
os.environ['MP-Master-Key'] = "5b9f531a-fbb8-487a-8045-3b4c7ac5acee"
os.environ['MP-Private-Key'] = "test_private_oGslgmzSNL3RSkjlsnPOsZZg9IA"
os.environ['MP-Token'] = "ff1d576409b2587cc1c2"
self.assertTrue(self.opr.config)
def test_rsc_endpoints(self):
endpoint = 'checkout-invoice/confirm/test_98567JGF'
url= self.opr.get_rsc_endpoint(endpoint)
self.assertTrue(url.startswith('https') and url.endswith(endpoint))
def test_add_headers(self):
header = {'Foo':'Bar'}
self.opr.add_header(header)
self.assertTrue("Foo" in self.opr.headers.keys())
self.assertFalse('FooBar' in self.opr.headers.keys())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a 'general' unit tests file
This 'general' unit tests is responsible for all
miscellaneous/general functionalities<commit_after>
|
import os
import unittest
from mpower.opr import OPR
from mpower.store import Store
from . import MP_ACCESS_TOKENS
class TestGeneral(unittest.TestCase):
"""General/Miscellaneous tests"""
def setUp(self):
# Your MPower developer tokens
self.store = Store({"name":"FooBar store"})
self.opr_data = {'total_amount': 345, 'description': "Hello World",
"account_alias":"0266636984"}
self.opr = OPR(self.opr_data, self.store)
def tearDown(self):
self.opr = None
self.store = None
self.opr_data = None
def test_runtime_configs(self):
self.assertEqual(MP_ACCESS_TOKENS,
self.opr.runtime_configs)
def test_system_configs_env(self):
os.environ['MP-Master-Key'] = "5b9f531a-fbb8-487a-8045-3b4c7ac5acee"
os.environ['MP-Private-Key'] = "test_private_oGslgmzSNL3RSkjlsnPOsZZg9IA"
os.environ['MP-Token'] = "ff1d576409b2587cc1c2"
self.assertTrue(self.opr.config)
def test_rsc_endpoints(self):
endpoint = 'checkout-invoice/confirm/test_98567JGF'
url= self.opr.get_rsc_endpoint(endpoint)
self.assertTrue(url.startswith('https') and url.endswith(endpoint))
def test_add_headers(self):
header = {'Foo':'Bar'}
self.opr.add_header(header)
self.assertTrue("Foo" in self.opr.headers.keys())
self.assertFalse('FooBar' in self.opr.headers.keys())
if __name__ == '__main__':
unittest.main()
|
Add a 'general' unit tests file
This 'general' unit tests is responsible for all
miscellaneous/general functionalitiesimport os
import unittest
from mpower.opr import OPR
from mpower.store import Store
from . import MP_ACCESS_TOKENS
class TestGeneral(unittest.TestCase):
"""General/Miscellaneous tests"""
def setUp(self):
# Your MPower developer tokens
self.store = Store({"name":"FooBar store"})
self.opr_data = {'total_amount': 345, 'description': "Hello World",
"account_alias":"0266636984"}
self.opr = OPR(self.opr_data, self.store)
def tearDown(self):
self.opr = None
self.store = None
self.opr_data = None
def test_runtime_configs(self):
self.assertEqual(MP_ACCESS_TOKENS,
self.opr.runtime_configs)
def test_system_configs_env(self):
os.environ['MP-Master-Key'] = "5b9f531a-fbb8-487a-8045-3b4c7ac5acee"
os.environ['MP-Private-Key'] = "test_private_oGslgmzSNL3RSkjlsnPOsZZg9IA"
os.environ['MP-Token'] = "ff1d576409b2587cc1c2"
self.assertTrue(self.opr.config)
def test_rsc_endpoints(self):
endpoint = 'checkout-invoice/confirm/test_98567JGF'
url= self.opr.get_rsc_endpoint(endpoint)
self.assertTrue(url.startswith('https') and url.endswith(endpoint))
def test_add_headers(self):
header = {'Foo':'Bar'}
self.opr.add_header(header)
self.assertTrue("Foo" in self.opr.headers.keys())
self.assertFalse('FooBar' in self.opr.headers.keys())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a 'general' unit tests file
This 'general' unit tests is responsible for all
miscellaneous/general functionalities<commit_after>import os
import unittest
from mpower.opr import OPR
from mpower.store import Store
from . import MP_ACCESS_TOKENS
class TestGeneral(unittest.TestCase):
"""General/Miscellaneous tests"""
def setUp(self):
# Your MPower developer tokens
self.store = Store({"name":"FooBar store"})
self.opr_data = {'total_amount': 345, 'description': "Hello World",
"account_alias":"0266636984"}
self.opr = OPR(self.opr_data, self.store)
def tearDown(self):
self.opr = None
self.store = None
self.opr_data = None
def test_runtime_configs(self):
self.assertEqual(MP_ACCESS_TOKENS,
self.opr.runtime_configs)
def test_system_configs_env(self):
os.environ['MP-Master-Key'] = "5b9f531a-fbb8-487a-8045-3b4c7ac5acee"
os.environ['MP-Private-Key'] = "test_private_oGslgmzSNL3RSkjlsnPOsZZg9IA"
os.environ['MP-Token'] = "ff1d576409b2587cc1c2"
self.assertTrue(self.opr.config)
def test_rsc_endpoints(self):
endpoint = 'checkout-invoice/confirm/test_98567JGF'
url= self.opr.get_rsc_endpoint(endpoint)
self.assertTrue(url.startswith('https') and url.endswith(endpoint))
def test_add_headers(self):
header = {'Foo':'Bar'}
self.opr.add_header(header)
self.assertTrue("Foo" in self.opr.headers.keys())
self.assertFalse('FooBar' in self.opr.headers.keys())
if __name__ == '__main__':
unittest.main()
|
|
5ae52b9e16b073322550f0a7ed9d560f5f823847
|
tests/test_hackage.py
|
tests/test_hackage.py
|
from tests.helper import ExternalVersionTestCase
class HackageTest(ExternalVersionTestCase):
def test_hackage(self):
self.assertEqual(self.sync_get_version("sessions", {"hackage": None}), "2008.7.18")
|
Add a testcase for Hackage
|
Add a testcase for Hackage
|
Python
|
mit
|
lilydjwg/nvchecker
|
Add a testcase for Hackage
|
from tests.helper import ExternalVersionTestCase
class HackageTest(ExternalVersionTestCase):
def test_hackage(self):
self.assertEqual(self.sync_get_version("sessions", {"hackage": None}), "2008.7.18")
|
<commit_before><commit_msg>Add a testcase for Hackage<commit_after>
|
from tests.helper import ExternalVersionTestCase
class HackageTest(ExternalVersionTestCase):
def test_hackage(self):
self.assertEqual(self.sync_get_version("sessions", {"hackage": None}), "2008.7.18")
|
Add a testcase for Hackagefrom tests.helper import ExternalVersionTestCase
class HackageTest(ExternalVersionTestCase):
def test_hackage(self):
self.assertEqual(self.sync_get_version("sessions", {"hackage": None}), "2008.7.18")
|
<commit_before><commit_msg>Add a testcase for Hackage<commit_after>from tests.helper import ExternalVersionTestCase
class HackageTest(ExternalVersionTestCase):
def test_hackage(self):
self.assertEqual(self.sync_get_version("sessions", {"hackage": None}), "2008.7.18")
|
|
9320338b7edebdf864bfe8d3dd6cfb1a5c2f868d
|
util/calc_ir_table.py
|
util/calc_ir_table.py
|
#! /usr/bin/env python
import sys
it = iter(l)
for i,j in zip(it, it):
aij.append(i/2**39+j/2**32),
for i in range(0, len(aij)):
sys.stdout.write("{0:.10e}f, ".format(aij[i]))
if (i+1)%8==0:
print()
|
Add MLX IR table generator.
|
Add MLX IR table generator.
|
Python
|
mit
|
iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,openmv/openmv,kwagyeman/openmv,openmv/openmv,openmv/openmv,openmv/openmv,kwagyeman/openmv,iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv
|
Add MLX IR table generator.
|
#! /usr/bin/env python
import sys
it = iter(l)
for i,j in zip(it, it):
aij.append(i/2**39+j/2**32),
for i in range(0, len(aij)):
sys.stdout.write("{0:.10e}f, ".format(aij[i]))
if (i+1)%8==0:
print()
|
<commit_before><commit_msg>Add MLX IR table generator.<commit_after>
|
#! /usr/bin/env python
import sys
it = iter(l)
for i,j in zip(it, it):
aij.append(i/2**39+j/2**32),
for i in range(0, len(aij)):
sys.stdout.write("{0:.10e}f, ".format(aij[i]))
if (i+1)%8==0:
print()
|
Add MLX IR table generator.#! /usr/bin/env python
import sys
it = iter(l)
for i,j in zip(it, it):
aij.append(i/2**39+j/2**32),
for i in range(0, len(aij)):
sys.stdout.write("{0:.10e}f, ".format(aij[i]))
if (i+1)%8==0:
print()
|
<commit_before><commit_msg>Add MLX IR table generator.<commit_after>#! /usr/bin/env python
import sys
it = iter(l)
for i,j in zip(it, it):
aij.append(i/2**39+j/2**32),
for i in range(0, len(aij)):
sys.stdout.write("{0:.10e}f, ".format(aij[i]))
if (i+1)%8==0:
print()
|
|
e29dc1788292930d4d9585b5ef764ffdf567ade3
|
show_usbcamera.py
|
show_usbcamera.py
|
#! /usr/bin/env python
#
# Show the USB camera
#
#
# External dependencies
#
import sys
from PySide import QtGui
import VisionToolkit as vtk
#
# Main application
#
if __name__ == '__main__' :
application = QtGui.QApplication( sys.argv )
widget = vtk.UsbCameraWidget()
widget.show()
sys.exit( application.exec_() )
|
Add a script to display a USB camera.
|
Add a script to display a USB camera.
|
Python
|
mit
|
microy/VisionToolkit,microy/PyStereoVisionToolkit,microy/VisionToolkit,microy/StereoVision,microy/StereoVision,microy/PyStereoVisionToolkit
|
Add a script to display a USB camera.
|
#! /usr/bin/env python
#
# Show the USB camera
#
#
# External dependencies
#
import sys
from PySide import QtGui
import VisionToolkit as vtk
#
# Main application
#
if __name__ == '__main__' :
application = QtGui.QApplication( sys.argv )
widget = vtk.UsbCameraWidget()
widget.show()
sys.exit( application.exec_() )
|
<commit_before><commit_msg>Add a script to display a USB camera.<commit_after>
|
#! /usr/bin/env python
#
# Show the USB camera
#
#
# External dependencies
#
import sys
from PySide import QtGui
import VisionToolkit as vtk
#
# Main application
#
if __name__ == '__main__' :
application = QtGui.QApplication( sys.argv )
widget = vtk.UsbCameraWidget()
widget.show()
sys.exit( application.exec_() )
|
Add a script to display a USB camera.#! /usr/bin/env python
#
# Show the USB camera
#
#
# External dependencies
#
import sys
from PySide import QtGui
import VisionToolkit as vtk
#
# Main application
#
if __name__ == '__main__' :
application = QtGui.QApplication( sys.argv )
widget = vtk.UsbCameraWidget()
widget.show()
sys.exit( application.exec_() )
|
<commit_before><commit_msg>Add a script to display a USB camera.<commit_after>#! /usr/bin/env python
#
# Show the USB camera
#
#
# External dependencies
#
import sys
from PySide import QtGui
import VisionToolkit as vtk
#
# Main application
#
if __name__ == '__main__' :
application = QtGui.QApplication( sys.argv )
widget = vtk.UsbCameraWidget()
widget.show()
sys.exit( application.exec_() )
|
|
db9772f5cd856f4fa66625d229982e2546574f59
|
avalonstar/apps/subscribers/migrations/0005_count.py
|
avalonstar/apps/subscribers/migrations/0005_count.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('subscribers', '0004_auto_20150224_1454'),
]
operations = [
migrations.CreateModel(
name='Count',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.IntegerField()),
('timestamp', models.DateTimeField(default=datetime.datetime(2015, 3, 4, 1, 52, 0, 594138, tzinfo=utc))),
],
options={
'ordering': ['timestamp'],
},
bases=(models.Model,),
),
]
|
Add a migration for Count.
|
Add a migration for Count.
|
Python
|
apache-2.0
|
bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv,bryanveloso/avalonstar-tv
|
Add a migration for Count.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('subscribers', '0004_auto_20150224_1454'),
]
operations = [
migrations.CreateModel(
name='Count',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.IntegerField()),
('timestamp', models.DateTimeField(default=datetime.datetime(2015, 3, 4, 1, 52, 0, 594138, tzinfo=utc))),
],
options={
'ordering': ['timestamp'],
},
bases=(models.Model,),
),
]
|
<commit_before><commit_msg>Add a migration for Count.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('subscribers', '0004_auto_20150224_1454'),
]
operations = [
migrations.CreateModel(
name='Count',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.IntegerField()),
('timestamp', models.DateTimeField(default=datetime.datetime(2015, 3, 4, 1, 52, 0, 594138, tzinfo=utc))),
],
options={
'ordering': ['timestamp'],
},
bases=(models.Model,),
),
]
|
Add a migration for Count.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('subscribers', '0004_auto_20150224_1454'),
]
operations = [
migrations.CreateModel(
name='Count',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.IntegerField()),
('timestamp', models.DateTimeField(default=datetime.datetime(2015, 3, 4, 1, 52, 0, 594138, tzinfo=utc))),
],
options={
'ordering': ['timestamp'],
},
bases=(models.Model,),
),
]
|
<commit_before><commit_msg>Add a migration for Count.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('subscribers', '0004_auto_20150224_1454'),
]
operations = [
migrations.CreateModel(
name='Count',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('total', models.IntegerField()),
('timestamp', models.DateTimeField(default=datetime.datetime(2015, 3, 4, 1, 52, 0, 594138, tzinfo=utc))),
],
options={
'ordering': ['timestamp'],
},
bases=(models.Model,),
),
]
|
|
1d94290cdf5b0a111924ff9aa73d69b9e0f41165
|
scripts/update_thanks.py
|
scripts/update_thanks.py
|
#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
Add the script for the THANKS file
|
Add the script for the THANKS file
|
Python
|
mit
|
prezi/gunicorn,malept/gunicorn,jamesblunt/gunicorn,mvaled/gunicorn,prezi/gunicorn,harrisonfeng/gunicorn,GitHublong/gunicorn,keakon/gunicorn,zhoucen/gunicorn,malept/gunicorn,zhoucen/gunicorn,ccl0326/gunicorn,jamesblunt/gunicorn,mvaled/gunicorn,ccl0326/gunicorn,gtrdotmcs/gunicorn,1stvamp/gunicorn,WSDC-NITWarangal/gunicorn,zhoucen/gunicorn,tempbottle/gunicorn,jamesblunt/gunicorn,mvaled/gunicorn,ccl0326/gunicorn,1stvamp/gunicorn,tejasmanohar/gunicorn,1stvamp/gunicorn,elelianghh/gunicorn,z-fork/gunicorn,beni55/gunicorn,gtrdotmcs/gunicorn,gtrdotmcs/gunicorn,MrKiven/gunicorn,prezi/gunicorn,malept/gunicorn,ephes/gunicorn
|
Add the script for the THANKS file
|
#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
<commit_before><commit_msg>Add the script for the THANKS file<commit_after>
|
#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
Add the script for the THANKS file#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
<commit_before><commit_msg>Add the script for the THANKS file<commit_after>#!/usr/bin/env python
# Usage: git log --format="%an <%ae>" | python update_thanks.py
# You will get a result.txt file, you can work with the file (update, remove, ...)
#
# Install
# =======
# pip install validate_email pyDNS
#
from __future__ import print_function
import os
import sys
from validate_email import validate_email
from email.utils import parseaddr
import DNS.Base
addresses = set()
bad_addresses = set()
collection = []
lines = list(reversed(sys.stdin.readlines()))
for author in map(str.strip, lines):
realname, email_address = parseaddr(author)
if email_address not in addresses:
if email_address in bad_addresses:
continue
else:
try:
value = validate_email(email_address)
if value:
addresses.add(email_address)
collection.append(author)
else:
bad_addresses.add(email_address)
except DNS.Base.TimeoutError:
bad_addresses.add(email_address)
with open('result.txt', 'w') as output:
output.write('\n'.join(collection))
|
|
d766bfc19ce627e5141e6ab355957eb1fa5d716d
|
sii/utils.py
|
sii/utils.py
|
import ssl
def fix_ssl_verify():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
|
Add function to not verify ssl
|
Add function to not verify ssl
|
Python
|
mit
|
gisce/sii
|
Add function to not verify ssl
|
import ssl
def fix_ssl_verify():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
|
<commit_before><commit_msg>Add function to not verify ssl<commit_after>
|
import ssl
def fix_ssl_verify():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
|
Add function to not verify sslimport ssl
def fix_ssl_verify():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
|
<commit_before><commit_msg>Add function to not verify ssl<commit_after>import ssl
def fix_ssl_verify():
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
|
|
964a0f848786cdbbf0b41c46cb66d3ec1a255120
|
hacks.py
|
hacks.py
|
import ConfigParser
import requests
import urllib
def get_settings(_config_cache=[]):
config = ConfigParser.RawConfigParser()
config.read(['settings.ini'])
return config
def meetup_urls(method='groups.json'):
base_url = 'http://api.meetup.com/'
url = (base_url + method)
return (url, {'key': get_settings().get('api_keys', 'meetup')})
def top_python_groups():
url, data = meetup_urls()
data['topic']='python'
data['order'] = 'members'
data['page'] = '200'
response = requests.get(url + '?' + urllib.urlencode(data))
return response
|
Add minimal script to get top 200 Python-related Meetup groups
|
Add minimal script to get top 200 Python-related Meetup groups
|
Python
|
apache-2.0
|
paulproteus/pug-meta-organizing
|
Add minimal script to get top 200 Python-related Meetup groups
|
import ConfigParser
import requests
import urllib
def get_settings(_config_cache=[]):
config = ConfigParser.RawConfigParser()
config.read(['settings.ini'])
return config
def meetup_urls(method='groups.json'):
base_url = 'http://api.meetup.com/'
url = (base_url + method)
return (url, {'key': get_settings().get('api_keys', 'meetup')})
def top_python_groups():
url, data = meetup_urls()
data['topic']='python'
data['order'] = 'members'
data['page'] = '200'
response = requests.get(url + '?' + urllib.urlencode(data))
return response
|
<commit_before><commit_msg>Add minimal script to get top 200 Python-related Meetup groups<commit_after>
|
import ConfigParser
import requests
import urllib
def get_settings(_config_cache=[]):
config = ConfigParser.RawConfigParser()
config.read(['settings.ini'])
return config
def meetup_urls(method='groups.json'):
base_url = 'http://api.meetup.com/'
url = (base_url + method)
return (url, {'key': get_settings().get('api_keys', 'meetup')})
def top_python_groups():
url, data = meetup_urls()
data['topic']='python'
data['order'] = 'members'
data['page'] = '200'
response = requests.get(url + '?' + urllib.urlencode(data))
return response
|
Add minimal script to get top 200 Python-related Meetup groupsimport ConfigParser
import requests
import urllib
def get_settings(_config_cache=[]):
config = ConfigParser.RawConfigParser()
config.read(['settings.ini'])
return config
def meetup_urls(method='groups.json'):
base_url = 'http://api.meetup.com/'
url = (base_url + method)
return (url, {'key': get_settings().get('api_keys', 'meetup')})
def top_python_groups():
url, data = meetup_urls()
data['topic']='python'
data['order'] = 'members'
data['page'] = '200'
response = requests.get(url + '?' + urllib.urlencode(data))
return response
|
<commit_before><commit_msg>Add minimal script to get top 200 Python-related Meetup groups<commit_after>import ConfigParser
import requests
import urllib
def get_settings(_config_cache=[]):
config = ConfigParser.RawConfigParser()
config.read(['settings.ini'])
return config
def meetup_urls(method='groups.json'):
base_url = 'http://api.meetup.com/'
url = (base_url + method)
return (url, {'key': get_settings().get('api_keys', 'meetup')})
def top_python_groups():
url, data = meetup_urls()
data['topic']='python'
data['order'] = 'members'
data['page'] = '200'
response = requests.get(url + '?' + urllib.urlencode(data))
return response
|
|
1bef42bceaa3b188f3f54d97d6927dc9c86a8598
|
text-and-point.py
|
text-and-point.py
|
# On some systems, need to import like this:
# import Image
# import ImageDraw
# On others, import like this:
from PIL import Image, ImageDraw
im = Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw.text((5,5), 'Hello', red)
draw.point((50, 50), red)
im.save('f.png')
|
Add simple script to print text and a point
|
Add simple script to print text and a point
|
Python
|
mit
|
redpig2/pilhacks
|
Add simple script to print text and a point
|
# On some systems, need to import like this:
# import Image
# import ImageDraw
# On others, import like this:
from PIL import Image, ImageDraw
im = Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw.text((5,5), 'Hello', red)
draw.point((50, 50), red)
im.save('f.png')
|
<commit_before><commit_msg>Add simple script to print text and a point<commit_after>
|
# On some systems, need to import like this:
# import Image
# import ImageDraw
# On others, import like this:
from PIL import Image, ImageDraw
im = Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw.text((5,5), 'Hello', red)
draw.point((50, 50), red)
im.save('f.png')
|
Add simple script to print text and a point# On some systems, need to import like this:
# import Image
# import ImageDraw
# On others, import like this:
from PIL import Image, ImageDraw
im = Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw.text((5,5), 'Hello', red)
draw.point((50, 50), red)
im.save('f.png')
|
<commit_before><commit_msg>Add simple script to print text and a point<commit_after># On some systems, need to import like this:
# import Image
# import ImageDraw
# On others, import like this:
from PIL import Image, ImageDraw
im = Image.new('RGB', (100, 100))
draw = ImageDraw.Draw(im)
red = (255, 0, 0)
draw.text((5,5), 'Hello', red)
draw.point((50, 50), red)
im.save('f.png')
|
|
8e3f9cdc55fae71634559c83637e8ab3d69b732d
|
src/main/resources/script_templates/Hadim_Scripts/ROI/Circle_ROI_Builder.py
|
src/main/resources/script_templates/Hadim_Scripts/ROI/Circle_ROI_Builder.py
|
# @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
|
Add circle roi builder script
|
Add circle roi builder script
|
Python
|
bsd-3-clause
|
hadim/fiji_scripts,hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_tools
|
Add circle roi builder script
|
# @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
|
<commit_before><commit_msg>Add circle roi builder script<commit_after>
|
# @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
|
Add circle roi builder script# @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
|
<commit_before><commit_msg>Add circle roi builder script<commit_after># @Float(label="Diameter of the circle ROI (pixel)", value=7) circle_diam
from ij.plugin.frame import RoiManager
from ij.gui import OvalRoi
rm = RoiManager.getInstance()
new_rois = []
for roi in rm.getRoisAsArray():
assert roi.getTypeAsString() == 'Point', "ROI needs to be a point"
x_center = roi.getContainedPoints()[0].x - (circle_diam / 2) + 0.5
y_center = roi.getContainedPoints()[0].y - (circle_diam / 2) + 0.5
new_roi = OvalRoi(x_center, y_center, circle_diam, circle_diam)
new_rois.append(new_roi)
rm.reset()
for new_roi in new_rois:
rm.addRoi(new_roi)
print("Done")
|
|
43eddc3663f64e92a673c09ce52ddcd50b935842
|
ipywidgets/widgets/tests/test_widget_float.py
|
ipywidgets/widgets/tests/test_widget_float.py
|
from unittest import TestCase
from traitlets import TraitError
from ipywidgets import FloatSlider
class TestFloatSlider(TestCase):
def test_construction(self):
FloatSlider()
def test_construction_readout_format(self):
slider = FloatSlider(readout_format='$.1f')
assert slider.get_state()['readout_format'] == '$.1f'
def test_construction_invalid_readout_format(self):
with self.assertRaises(TraitError):
FloatSlider(readout_format='broken')
|
Test that the float slider uses the NumberFormat traitlet
|
Test that the float slider uses the NumberFormat traitlet
|
Python
|
bsd-3-clause
|
ipython/ipywidgets,ipython/ipywidgets,ipython/ipywidgets,ipython/ipywidgets,SylvainCorlay/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets,jupyter-widgets/ipywidgets,SylvainCorlay/ipywidgets,jupyter-widgets/ipywidgets,jupyter-widgets/ipywidgets,ipython/ipywidgets,SylvainCorlay/ipywidgets
|
Test that the float slider uses the NumberFormat traitlet
|
from unittest import TestCase
from traitlets import TraitError
from ipywidgets import FloatSlider
class TestFloatSlider(TestCase):
def test_construction(self):
FloatSlider()
def test_construction_readout_format(self):
slider = FloatSlider(readout_format='$.1f')
assert slider.get_state()['readout_format'] == '$.1f'
def test_construction_invalid_readout_format(self):
with self.assertRaises(TraitError):
FloatSlider(readout_format='broken')
|
<commit_before><commit_msg>Test that the float slider uses the NumberFormat traitlet<commit_after>
|
from unittest import TestCase
from traitlets import TraitError
from ipywidgets import FloatSlider
class TestFloatSlider(TestCase):
def test_construction(self):
FloatSlider()
def test_construction_readout_format(self):
slider = FloatSlider(readout_format='$.1f')
assert slider.get_state()['readout_format'] == '$.1f'
def test_construction_invalid_readout_format(self):
with self.assertRaises(TraitError):
FloatSlider(readout_format='broken')
|
Test that the float slider uses the NumberFormat traitlet
from unittest import TestCase
from traitlets import TraitError
from ipywidgets import FloatSlider
class TestFloatSlider(TestCase):
def test_construction(self):
FloatSlider()
def test_construction_readout_format(self):
slider = FloatSlider(readout_format='$.1f')
assert slider.get_state()['readout_format'] == '$.1f'
def test_construction_invalid_readout_format(self):
with self.assertRaises(TraitError):
FloatSlider(readout_format='broken')
|
<commit_before><commit_msg>Test that the float slider uses the NumberFormat traitlet<commit_after>
from unittest import TestCase
from traitlets import TraitError
from ipywidgets import FloatSlider
class TestFloatSlider(TestCase):
def test_construction(self):
FloatSlider()
def test_construction_readout_format(self):
slider = FloatSlider(readout_format='$.1f')
assert slider.get_state()['readout_format'] == '$.1f'
def test_construction_invalid_readout_format(self):
with self.assertRaises(TraitError):
FloatSlider(readout_format='broken')
|
|
db3749beefb5d8d33dae2044aa4ac09b1d3a0d80
|
tests/test_movingfiles.py
|
tests/test_movingfiles.py
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests moving renamed files
"""
from functional_runner import run_tvnamer, verify_out_data
def test_simple_realtive_move():
"""
"""
conf = """
{"move_files_enable": true,
"move_files_desination": "test/",
"batch": true}
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_config = conf,
with_input = "")
expected_files = ['test/Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
|
Add (currently failing) test for file moving
|
Add (currently failing) test for file moving
|
Python
|
unlicense
|
dbr/tvnamer,lahwaacz/tvnamer,m42e/tvnamer
|
Add (currently failing) test for file moving
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests moving renamed files
"""
from functional_runner import run_tvnamer, verify_out_data
def test_simple_realtive_move():
"""
"""
conf = """
{"move_files_enable": true,
"move_files_desination": "test/",
"batch": true}
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_config = conf,
with_input = "")
expected_files = ['test/Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
|
<commit_before><commit_msg>Add (currently failing) test for file moving<commit_after>
|
#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests moving renamed files
"""
from functional_runner import run_tvnamer, verify_out_data
def test_simple_realtive_move():
"""
"""
conf = """
{"move_files_enable": true,
"move_files_desination": "test/",
"batch": true}
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_config = conf,
with_input = "")
expected_files = ['test/Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
|
Add (currently failing) test for file moving#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests moving renamed files
"""
from functional_runner import run_tvnamer, verify_out_data
def test_simple_realtive_move():
"""
"""
conf = """
{"move_files_enable": true,
"move_files_desination": "test/",
"batch": true}
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_config = conf,
with_input = "")
expected_files = ['test/Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
|
<commit_before><commit_msg>Add (currently failing) test for file moving<commit_after>#!/usr/bin/env python
#encoding:utf-8
#author:dbr/Ben
#project:tvnamer
#repository:http://github.com/dbr/tvnamer
#license:Creative Commons GNU GPL v2
# http://creativecommons.org/licenses/GPL/2.0/
"""Tests moving renamed files
"""
from functional_runner import run_tvnamer, verify_out_data
def test_simple_realtive_move():
"""
"""
conf = """
{"move_files_enable": true,
"move_files_desination": "test/",
"batch": true}
"""
out_data = run_tvnamer(
with_files = ['scrubs.s01e01.avi'],
with_config = conf,
with_input = "")
expected_files = ['test/Scrubs - [01x01] - My First Day.avi']
verify_out_data(out_data, expected_files)
|
|
f4f6b1af7c11389666cf5ec562fef874388eb5a3
|
loess.py
|
loess.py
|
from pylab import *
def loessInternal( x, h, xp, yp ):
w = exp( -0.5*( ((x-xp)/(2*h))**2 )/sqrt(2*pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
def loess(x,y,h):
"""LOESS model free bandwidth reduction.
See "Data Analysis with Open Source Tools" by P. K. Janert for fdetails.
Watch out that x and y do not become too small,
microseconds don't work. h is bandwidth in units of x"""
out = []
for k in x:
out.append( loessInternal(k, h, x, y))
return out
|
Add LOESS module for model free bandwidth reduction.
|
Add LOESS module for model free bandwidth reduction.
|
Python
|
mit
|
bennomeier/pyNMR,kourk0am/pyNMR
|
Add LOESS module for model free bandwidth reduction.
|
from pylab import *
def loessInternal( x, h, xp, yp ):
w = exp( -0.5*( ((x-xp)/(2*h))**2 )/sqrt(2*pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
def loess(x,y,h):
"""LOESS model free bandwidth reduction.
See "Data Analysis with Open Source Tools" by P. K. Janert for fdetails.
Watch out that x and y do not become too small,
microseconds don't work. h is bandwidth in units of x"""
out = []
for k in x:
out.append( loessInternal(k, h, x, y))
return out
|
<commit_before><commit_msg>Add LOESS module for model free bandwidth reduction.<commit_after>
|
from pylab import *
def loessInternal( x, h, xp, yp ):
w = exp( -0.5*( ((x-xp)/(2*h))**2 )/sqrt(2*pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
def loess(x,y,h):
"""LOESS model free bandwidth reduction.
See "Data Analysis with Open Source Tools" by P. K. Janert for fdetails.
Watch out that x and y do not become too small,
microseconds don't work. h is bandwidth in units of x"""
out = []
for k in x:
out.append( loessInternal(k, h, x, y))
return out
|
Add LOESS module for model free bandwidth reduction.from pylab import *
def loessInternal( x, h, xp, yp ):
w = exp( -0.5*( ((x-xp)/(2*h))**2 )/sqrt(2*pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
def loess(x,y,h):
"""LOESS model free bandwidth reduction.
See "Data Analysis with Open Source Tools" by P. K. Janert for fdetails.
Watch out that x and y do not become too small,
microseconds don't work. h is bandwidth in units of x"""
out = []
for k in x:
out.append( loessInternal(k, h, x, y))
return out
|
<commit_before><commit_msg>Add LOESS module for model free bandwidth reduction.<commit_after>from pylab import *
def loessInternal( x, h, xp, yp ):
w = exp( -0.5*( ((x-xp)/(2*h))**2 )/sqrt(2*pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
def loess(x,y,h):
"""LOESS model free bandwidth reduction.
See "Data Analysis with Open Source Tools" by P. K. Janert for fdetails.
Watch out that x and y do not become too small,
microseconds don't work. h is bandwidth in units of x"""
out = []
for k in x:
out.append( loessInternal(k, h, x, y))
return out
|
|
eb25f8bd1ab344d951f78cfe1e655e4f755bebd6
|
bin/verify-identity.py
|
bin/verify-identity.py
|
"""verify-identity.py <participant_id>, <country_code>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from gratipay import wireup
from gratipay.models.participant import Participant
from gratipay.models.country import Country
wireup.db(wireup.env())
participant = Participant.from_id(int(sys.argv[1]))
country = Country.from_code(sys.argv[2])
participant.set_identity_verification(country.id, True)
|
Write a script to verify identity
|
Write a script to verify identity
|
Python
|
mit
|
gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com
|
Write a script to verify identity
|
"""verify-identity.py <participant_id>, <country_code>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from gratipay import wireup
from gratipay.models.participant import Participant
from gratipay.models.country import Country
wireup.db(wireup.env())
participant = Participant.from_id(int(sys.argv[1]))
country = Country.from_code(sys.argv[2])
participant.set_identity_verification(country.id, True)
|
<commit_before><commit_msg>Write a script to verify identity<commit_after>
|
"""verify-identity.py <participant_id>, <country_code>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from gratipay import wireup
from gratipay.models.participant import Participant
from gratipay.models.country import Country
wireup.db(wireup.env())
participant = Participant.from_id(int(sys.argv[1]))
country = Country.from_code(sys.argv[2])
participant.set_identity_verification(country.id, True)
|
Write a script to verify identity"""verify-identity.py <participant_id>, <country_code>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from gratipay import wireup
from gratipay.models.participant import Participant
from gratipay.models.country import Country
wireup.db(wireup.env())
participant = Participant.from_id(int(sys.argv[1]))
country = Country.from_code(sys.argv[2])
participant.set_identity_verification(country.id, True)
|
<commit_before><commit_msg>Write a script to verify identity<commit_after>"""verify-identity.py <participant_id>, <country_code>
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from gratipay import wireup
from gratipay.models.participant import Participant
from gratipay.models.country import Country
wireup.db(wireup.env())
participant = Participant.from_id(int(sys.argv[1]))
country = Country.from_code(sys.argv[2])
participant.set_identity_verification(country.id, True)
|
|
0eddc59a97fca233b5979d61a19f58179c57bc08
|
CodeFights/twoLines.py
|
CodeFights/twoLines.py
|
#!/usr/local/bin/python
# Code Fights Two Lines Problem
from functools import partial
def line_y(m, b, x):
return m * x + b
def twoLines(line1, line2, l, r):
line1_y = partial(line_y, *line1)
line2_y = partial(line_y, *line2)
balance = 0
for x in range(l, r + 1):
y1 = line1_y(x)
y2 = line2_y(x)
if y1 > y2:
balance += 1
elif y1 < y2:
balance -= 1
if balance > 0:
return "first"
if balance < 0:
return "second"
return "any"
def main():
tests = [
[[1, 2], [2, 1], 0, 2, "any"],
[[1, 2], [2, 1], -1, 2, "first"],
[[1, 2], [2, 1], 0, 3, "second"],
[[1, 2], [1, 0], -1000, 1000, "first"],
[[1, 0], [-1, 0], -239, 239, "any"],
[[1, 0], [-1, 0], -999, 998, "second"]
]
for t in tests:
res = twoLines(t[0], t[1], t[2], t[3])
ans = t[4]
if ans == res:
print("PASSED: twoLines({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print(("FAILED: twoLines({}, {}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], t[3], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights two lines problem
|
Solve Code Fights two lines problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights two lines problem
|
#!/usr/local/bin/python
# Code Fights Two Lines Problem
from functools import partial
def line_y(m, b, x):
return m * x + b
def twoLines(line1, line2, l, r):
line1_y = partial(line_y, *line1)
line2_y = partial(line_y, *line2)
balance = 0
for x in range(l, r + 1):
y1 = line1_y(x)
y2 = line2_y(x)
if y1 > y2:
balance += 1
elif y1 < y2:
balance -= 1
if balance > 0:
return "first"
if balance < 0:
return "second"
return "any"
def main():
tests = [
[[1, 2], [2, 1], 0, 2, "any"],
[[1, 2], [2, 1], -1, 2, "first"],
[[1, 2], [2, 1], 0, 3, "second"],
[[1, 2], [1, 0], -1000, 1000, "first"],
[[1, 0], [-1, 0], -239, 239, "any"],
[[1, 0], [-1, 0], -999, 998, "second"]
]
for t in tests:
res = twoLines(t[0], t[1], t[2], t[3])
ans = t[4]
if ans == res:
print("PASSED: twoLines({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print(("FAILED: twoLines({}, {}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], t[3], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights two lines problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Two Lines Problem
from functools import partial
def line_y(m, b, x):
return m * x + b
def twoLines(line1, line2, l, r):
line1_y = partial(line_y, *line1)
line2_y = partial(line_y, *line2)
balance = 0
for x in range(l, r + 1):
y1 = line1_y(x)
y2 = line2_y(x)
if y1 > y2:
balance += 1
elif y1 < y2:
balance -= 1
if balance > 0:
return "first"
if balance < 0:
return "second"
return "any"
def main():
tests = [
[[1, 2], [2, 1], 0, 2, "any"],
[[1, 2], [2, 1], -1, 2, "first"],
[[1, 2], [2, 1], 0, 3, "second"],
[[1, 2], [1, 0], -1000, 1000, "first"],
[[1, 0], [-1, 0], -239, 239, "any"],
[[1, 0], [-1, 0], -999, 998, "second"]
]
for t in tests:
res = twoLines(t[0], t[1], t[2], t[3])
ans = t[4]
if ans == res:
print("PASSED: twoLines({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print(("FAILED: twoLines({}, {}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], t[3], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights two lines problem#!/usr/local/bin/python
# Code Fights Two Lines Problem
from functools import partial
def line_y(m, b, x):
return m * x + b
def twoLines(line1, line2, l, r):
line1_y = partial(line_y, *line1)
line2_y = partial(line_y, *line2)
balance = 0
for x in range(l, r + 1):
y1 = line1_y(x)
y2 = line2_y(x)
if y1 > y2:
balance += 1
elif y1 < y2:
balance -= 1
if balance > 0:
return "first"
if balance < 0:
return "second"
return "any"
def main():
tests = [
[[1, 2], [2, 1], 0, 2, "any"],
[[1, 2], [2, 1], -1, 2, "first"],
[[1, 2], [2, 1], 0, 3, "second"],
[[1, 2], [1, 0], -1000, 1000, "first"],
[[1, 0], [-1, 0], -239, 239, "any"],
[[1, 0], [-1, 0], -999, 998, "second"]
]
for t in tests:
res = twoLines(t[0], t[1], t[2], t[3])
ans = t[4]
if ans == res:
print("PASSED: twoLines({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print(("FAILED: twoLines({}, {}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], t[3], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights two lines problem<commit_after>#!/usr/local/bin/python
# Code Fights Two Lines Problem
from functools import partial
def line_y(m, b, x):
return m * x + b
def twoLines(line1, line2, l, r):
line1_y = partial(line_y, *line1)
line2_y = partial(line_y, *line2)
balance = 0
for x in range(l, r + 1):
y1 = line1_y(x)
y2 = line2_y(x)
if y1 > y2:
balance += 1
elif y1 < y2:
balance -= 1
if balance > 0:
return "first"
if balance < 0:
return "second"
return "any"
def main():
tests = [
[[1, 2], [2, 1], 0, 2, "any"],
[[1, 2], [2, 1], -1, 2, "first"],
[[1, 2], [2, 1], 0, 3, "second"],
[[1, 2], [1, 0], -1000, 1000, "first"],
[[1, 0], [-1, 0], -239, 239, "any"],
[[1, 0], [-1, 0], -999, 998, "second"]
]
for t in tests:
res = twoLines(t[0], t[1], t[2], t[3])
ans = t[4]
if ans == res:
print("PASSED: twoLines({}, {}, {}, {}) returned {}"
.format(t[0], t[1], t[2], t[3], res))
else:
print(("FAILED: twoLines({}, {}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], t[3], res, ans))
if __name__ == '__main__':
main()
|
|
2594b559722efa75322e669792cf8b9ba14f5014
|
Data-Structures/Trees/Binary_Trees/binary_tree.py
|
Data-Structures/Trees/Binary_Trees/binary_tree.py
|
"""Implementation of a Binary Tree in Python."""
class BinaryTree:
def __init__(self, root_node):
self.key = root_node
self.left_node = None
self.right_node = None
def insert_left(self, new_node):
if self.left_node == None:
self.left_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.left_node = self.left_node
self.left_node = tree
def insert_right(self, new_node):
if self.right_node == None:
self.right_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.right_node = self.right_node
self.right_node = tree
def get_left_node(self):
return self.left_node
def get_right_node(self):
return self.right_node
def get_root_value(self):
return self.key
def set_root_node(self, new_root):
self.key = new_root
|
Add python binary tree data structure
|
Add python binary tree data structure
|
Python
|
mit
|
salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook,salman-bhai/DS-Algo-Handbook
|
Add python binary tree data structure
|
"""Implementation of a Binary Tree in Python."""
class BinaryTree:
def __init__(self, root_node):
self.key = root_node
self.left_node = None
self.right_node = None
def insert_left(self, new_node):
if self.left_node == None:
self.left_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.left_node = self.left_node
self.left_node = tree
def insert_right(self, new_node):
if self.right_node == None:
self.right_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.right_node = self.right_node
self.right_node = tree
def get_left_node(self):
return self.left_node
def get_right_node(self):
return self.right_node
def get_root_value(self):
return self.key
def set_root_node(self, new_root):
self.key = new_root
|
<commit_before><commit_msg>Add python binary tree data structure<commit_after>
|
"""Implementation of a Binary Tree in Python."""
class BinaryTree:
def __init__(self, root_node):
self.key = root_node
self.left_node = None
self.right_node = None
def insert_left(self, new_node):
if self.left_node == None:
self.left_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.left_node = self.left_node
self.left_node = tree
def insert_right(self, new_node):
if self.right_node == None:
self.right_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.right_node = self.right_node
self.right_node = tree
def get_left_node(self):
return self.left_node
def get_right_node(self):
return self.right_node
def get_root_value(self):
return self.key
def set_root_node(self, new_root):
self.key = new_root
|
Add python binary tree data structure"""Implementation of a Binary Tree in Python."""
class BinaryTree:
def __init__(self, root_node):
self.key = root_node
self.left_node = None
self.right_node = None
def insert_left(self, new_node):
if self.left_node == None:
self.left_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.left_node = self.left_node
self.left_node = tree
def insert_right(self, new_node):
if self.right_node == None:
self.right_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.right_node = self.right_node
self.right_node = tree
def get_left_node(self):
return self.left_node
def get_right_node(self):
return self.right_node
def get_root_value(self):
return self.key
def set_root_node(self, new_root):
self.key = new_root
|
<commit_before><commit_msg>Add python binary tree data structure<commit_after>"""Implementation of a Binary Tree in Python."""
class BinaryTree:
def __init__(self, root_node):
self.key = root_node
self.left_node = None
self.right_node = None
def insert_left(self, new_node):
if self.left_node == None:
self.left_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.left_node = self.left_node
self.left_node = tree
def insert_right(self, new_node):
if self.right_node == None:
self.right_node = BinaryTree(new_node)
else:
tree = BinaryTree(new_node)
tree.right_node = self.right_node
self.right_node = tree
def get_left_node(self):
return self.left_node
def get_right_node(self):
return self.right_node
def get_root_value(self):
return self.key
def set_root_node(self, new_root):
self.key = new_root
|
|
ff81d21d5e68e916282b61b3c65bf0af41a1bad8
|
app/scripts/po_stats.py
|
app/scripts/po_stats.py
|
#! /usr/bin/env python
import argparse
import glob
import json
import os
import subprocess
import sys
# Import local libraries
library_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'libraries'))
# Polib library (https://bitbucket.org/izi/polib)
polib_path = os.path.join(library_path, 'polib')
if not os.path.isdir(polib_path):
try:
print 'Cloning polib...'
cmd_status = subprocess.check_output(
'hg clone https://bitbucket.org/izi/polib/ %s -u 1.0.7' % polib_path,
stderr=subprocess.STDOUT,
shell=True)
print cmd_status
except Exception as e:
print e
sys.path.append(os.path.join(polib_path))
try:
import polib
except ImportError:
print 'Error importing polib library'
sys.exit(1)
def create_file_list(repo_folder, locale, source_pattern):
''' Search for files to analyze '''
# Get a list of all reference files, since source_pattern can use wildcards
locale_files = glob.glob(
os.path.join(repo_folder, locale, source_pattern)
)
locale_files.sort()
return locale_files
def analyze_files(repo_folder, locale, source_pattern):
''' Analyze files, returning an array with stats and errors '''
global_stats = {}
# Get a list of all files for the reference locale
locale_files = create_file_list(repo_folder, locale, source_pattern)
for locale_file in locale_files:
fuzzy = 0
total = 0
translated = 0
untranslated = 0
try:
po = polib.pofile(locale_file)
fuzzy = len(po.fuzzy_entries())
translated = len(po.translated_entries())
untranslated = len(po.untranslated_entries())
except Exception as e:
print e
sys.exit(1)
total = translated + untranslated + fuzzy
source_index = os.path.basename(locale_file)
global_stats[source_index] = {
'fuzzy': fuzzy,
'total': total,
'translated': translated,
'untranslated': untranslated
}
return global_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_folder', help='Path to repository')
parser.add_argument(
'source_pattern', help='Source file pattern (wildcards are supported)')
parser.add_argument('locale', help='Locale code to analyze')
parser.add_argument('--pretty', action='store_true',
help='export indented and more readable JSON')
args = parser.parse_args()
global_stats = analyze_files(
args.repo_folder, args.locale, args.source_pattern)
if args.pretty:
print json.dumps(global_stats, sort_keys=True, indent=2)
else:
print json.dumps(global_stats)
if __name__ == '__main__':
main()
|
Create script using polib to generate stats for gettext file
|
Create script using polib to generate stats for gettext file
|
Python
|
mpl-2.0
|
flodolo/webstatus,mozilla-l10n/webstatus,mozilla-l10n/webstatus,flodolo/webstatus,flodolo/webstatus,mozilla-l10n/webstatus,mozilla-l10n/webstatus,flodolo/webstatus,mozilla-l10n/webstatus,flodolo/webstatus
|
Create script using polib to generate stats for gettext file
|
#! /usr/bin/env python
import argparse
import glob
import json
import os
import subprocess
import sys
# Import local libraries
library_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'libraries'))
# Polib library (https://bitbucket.org/izi/polib)
polib_path = os.path.join(library_path, 'polib')
if not os.path.isdir(polib_path):
try:
print 'Cloning polib...'
cmd_status = subprocess.check_output(
'hg clone https://bitbucket.org/izi/polib/ %s -u 1.0.7' % polib_path,
stderr=subprocess.STDOUT,
shell=True)
print cmd_status
except Exception as e:
print e
sys.path.append(os.path.join(polib_path))
try:
import polib
except ImportError:
print 'Error importing polib library'
sys.exit(1)
def create_file_list(repo_folder, locale, source_pattern):
''' Search for files to analyze '''
# Get a list of all reference files, since source_pattern can use wildcards
locale_files = glob.glob(
os.path.join(repo_folder, locale, source_pattern)
)
locale_files.sort()
return locale_files
def analyze_files(repo_folder, locale, source_pattern):
''' Analyze files, returning an array with stats and errors '''
global_stats = {}
# Get a list of all files for the reference locale
locale_files = create_file_list(repo_folder, locale, source_pattern)
for locale_file in locale_files:
fuzzy = 0
total = 0
translated = 0
untranslated = 0
try:
po = polib.pofile(locale_file)
fuzzy = len(po.fuzzy_entries())
translated = len(po.translated_entries())
untranslated = len(po.untranslated_entries())
except Exception as e:
print e
sys.exit(1)
total = translated + untranslated + fuzzy
source_index = os.path.basename(locale_file)
global_stats[source_index] = {
'fuzzy': fuzzy,
'total': total,
'translated': translated,
'untranslated': untranslated
}
return global_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_folder', help='Path to repository')
parser.add_argument(
'source_pattern', help='Source file pattern (wildcards are supported)')
parser.add_argument('locale', help='Locale code to analyze')
parser.add_argument('--pretty', action='store_true',
help='export indented and more readable JSON')
args = parser.parse_args()
global_stats = analyze_files(
args.repo_folder, args.locale, args.source_pattern)
if args.pretty:
print json.dumps(global_stats, sort_keys=True, indent=2)
else:
print json.dumps(global_stats)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create script using polib to generate stats for gettext file<commit_after>
|
#! /usr/bin/env python
import argparse
import glob
import json
import os
import subprocess
import sys
# Import local libraries
library_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'libraries'))
# Polib library (https://bitbucket.org/izi/polib)
polib_path = os.path.join(library_path, 'polib')
if not os.path.isdir(polib_path):
try:
print 'Cloning polib...'
cmd_status = subprocess.check_output(
'hg clone https://bitbucket.org/izi/polib/ %s -u 1.0.7' % polib_path,
stderr=subprocess.STDOUT,
shell=True)
print cmd_status
except Exception as e:
print e
sys.path.append(os.path.join(polib_path))
try:
import polib
except ImportError:
print 'Error importing polib library'
sys.exit(1)
def create_file_list(repo_folder, locale, source_pattern):
''' Search for files to analyze '''
# Get a list of all reference files, since source_pattern can use wildcards
locale_files = glob.glob(
os.path.join(repo_folder, locale, source_pattern)
)
locale_files.sort()
return locale_files
def analyze_files(repo_folder, locale, source_pattern):
''' Analyze files, returning an array with stats and errors '''
global_stats = {}
# Get a list of all files for the reference locale
locale_files = create_file_list(repo_folder, locale, source_pattern)
for locale_file in locale_files:
fuzzy = 0
total = 0
translated = 0
untranslated = 0
try:
po = polib.pofile(locale_file)
fuzzy = len(po.fuzzy_entries())
translated = len(po.translated_entries())
untranslated = len(po.untranslated_entries())
except Exception as e:
print e
sys.exit(1)
total = translated + untranslated + fuzzy
source_index = os.path.basename(locale_file)
global_stats[source_index] = {
'fuzzy': fuzzy,
'total': total,
'translated': translated,
'untranslated': untranslated
}
return global_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_folder', help='Path to repository')
parser.add_argument(
'source_pattern', help='Source file pattern (wildcards are supported)')
parser.add_argument('locale', help='Locale code to analyze')
parser.add_argument('--pretty', action='store_true',
help='export indented and more readable JSON')
args = parser.parse_args()
global_stats = analyze_files(
args.repo_folder, args.locale, args.source_pattern)
if args.pretty:
print json.dumps(global_stats, sort_keys=True, indent=2)
else:
print json.dumps(global_stats)
if __name__ == '__main__':
main()
|
Create script using polib to generate stats for gettext file#! /usr/bin/env python
import argparse
import glob
import json
import os
import subprocess
import sys
# Import local libraries
library_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'libraries'))
# Polib library (https://bitbucket.org/izi/polib)
polib_path = os.path.join(library_path, 'polib')
if not os.path.isdir(polib_path):
try:
print 'Cloning polib...'
cmd_status = subprocess.check_output(
'hg clone https://bitbucket.org/izi/polib/ %s -u 1.0.7' % polib_path,
stderr=subprocess.STDOUT,
shell=True)
print cmd_status
except Exception as e:
print e
sys.path.append(os.path.join(polib_path))
try:
import polib
except ImportError:
print 'Error importing polib library'
sys.exit(1)
def create_file_list(repo_folder, locale, source_pattern):
''' Search for files to analyze '''
# Get a list of all reference files, since source_pattern can use wildcards
locale_files = glob.glob(
os.path.join(repo_folder, locale, source_pattern)
)
locale_files.sort()
return locale_files
def analyze_files(repo_folder, locale, source_pattern):
''' Analyze files, returning an array with stats and errors '''
global_stats = {}
# Get a list of all files for the reference locale
locale_files = create_file_list(repo_folder, locale, source_pattern)
for locale_file in locale_files:
fuzzy = 0
total = 0
translated = 0
untranslated = 0
try:
po = polib.pofile(locale_file)
fuzzy = len(po.fuzzy_entries())
translated = len(po.translated_entries())
untranslated = len(po.untranslated_entries())
except Exception as e:
print e
sys.exit(1)
total = translated + untranslated + fuzzy
source_index = os.path.basename(locale_file)
global_stats[source_index] = {
'fuzzy': fuzzy,
'total': total,
'translated': translated,
'untranslated': untranslated
}
return global_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_folder', help='Path to repository')
parser.add_argument(
'source_pattern', help='Source file pattern (wildcards are supported)')
parser.add_argument('locale', help='Locale code to analyze')
parser.add_argument('--pretty', action='store_true',
help='export indented and more readable JSON')
args = parser.parse_args()
global_stats = analyze_files(
args.repo_folder, args.locale, args.source_pattern)
if args.pretty:
print json.dumps(global_stats, sort_keys=True, indent=2)
else:
print json.dumps(global_stats)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Create script using polib to generate stats for gettext file<commit_after>#! /usr/bin/env python
import argparse
import glob
import json
import os
import subprocess
import sys
# Import local libraries
library_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, 'libraries'))
# Polib library (https://bitbucket.org/izi/polib)
polib_path = os.path.join(library_path, 'polib')
if not os.path.isdir(polib_path):
try:
print 'Cloning polib...'
cmd_status = subprocess.check_output(
'hg clone https://bitbucket.org/izi/polib/ %s -u 1.0.7' % polib_path,
stderr=subprocess.STDOUT,
shell=True)
print cmd_status
except Exception as e:
print e
sys.path.append(os.path.join(polib_path))
try:
import polib
except ImportError:
print 'Error importing polib library'
sys.exit(1)
def create_file_list(repo_folder, locale, source_pattern):
''' Search for files to analyze '''
# Get a list of all reference files, since source_pattern can use wildcards
locale_files = glob.glob(
os.path.join(repo_folder, locale, source_pattern)
)
locale_files.sort()
return locale_files
def analyze_files(repo_folder, locale, source_pattern):
''' Analyze files, returning an array with stats and errors '''
global_stats = {}
# Get a list of all files for the reference locale
locale_files = create_file_list(repo_folder, locale, source_pattern)
for locale_file in locale_files:
fuzzy = 0
total = 0
translated = 0
untranslated = 0
try:
po = polib.pofile(locale_file)
fuzzy = len(po.fuzzy_entries())
translated = len(po.translated_entries())
untranslated = len(po.untranslated_entries())
except Exception as e:
print e
sys.exit(1)
total = translated + untranslated + fuzzy
source_index = os.path.basename(locale_file)
global_stats[source_index] = {
'fuzzy': fuzzy,
'total': total,
'translated': translated,
'untranslated': untranslated
}
return global_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument('repo_folder', help='Path to repository')
parser.add_argument(
'source_pattern', help='Source file pattern (wildcards are supported)')
parser.add_argument('locale', help='Locale code to analyze')
parser.add_argument('--pretty', action='store_true',
help='export indented and more readable JSON')
args = parser.parse_args()
global_stats = analyze_files(
args.repo_folder, args.locale, args.source_pattern)
if args.pretty:
print json.dumps(global_stats, sort_keys=True, indent=2)
else:
print json.dumps(global_stats)
if __name__ == '__main__':
main()
|
|
20b3b6415f68a25de82e5f6a3e2a3ded93d1620f
|
cal_pipe/plot_scans.py
|
cal_pipe/plot_scans.py
|
import numpy as np
import re
ms_active = raw_input("MS? : ")
field_str = raw_input("Field? : ")
tb.open(vis+"/FIELD")
names = tb.getcol('NAME')
matches = [string for string in names if re.match(field_str, string)]
posn_matches = \
[i for i, string in enumerate(names) if re.match(field_str, string)]
if len(matches) == 0:
raise TypeError("No matches found for the given field string")
tb.open(ms_active)
scanNums = sorted(np.unique(tb.getcol('SCAN_NUMBER')))
field_scans = []
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s'%ii)
field_scans.append(list(np.unique(subtable.getcol('SCAN_NUMBER'))))
tb.close()
field_scans = [scans for i, scans in field_scans if i in posn_matches]
for ii in range(len(field_scans)):
for jj in range(len(field_scans[ii])):
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = ii
scan = jj
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field'+matches[ii]+' Scan'+str(jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = 'field_'+matches[ii]+'_scan_'+str(jj)+'.png'
overwrite = True
showgui = False
async = False
plotms()
|
Save plots of amp vs time for each scan
|
Save plots of amp vs time for each scan
|
Python
|
mit
|
e-koch/canfar_scripts,e-koch/canfar_scripts
|
Save plots of amp vs time for each scan
|
import numpy as np
import re
ms_active = raw_input("MS? : ")
field_str = raw_input("Field? : ")
tb.open(vis+"/FIELD")
names = tb.getcol('NAME')
matches = [string for string in names if re.match(field_str, string)]
posn_matches = \
[i for i, string in enumerate(names) if re.match(field_str, string)]
if len(matches) == 0:
raise TypeError("No matches found for the given field string")
tb.open(ms_active)
scanNums = sorted(np.unique(tb.getcol('SCAN_NUMBER')))
field_scans = []
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s'%ii)
field_scans.append(list(np.unique(subtable.getcol('SCAN_NUMBER'))))
tb.close()
field_scans = [scans for i, scans in field_scans if i in posn_matches]
for ii in range(len(field_scans)):
for jj in range(len(field_scans[ii])):
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = ii
scan = jj
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field'+matches[ii]+' Scan'+str(jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = 'field_'+matches[ii]+'_scan_'+str(jj)+'.png'
overwrite = True
showgui = False
async = False
plotms()
|
<commit_before><commit_msg>Save plots of amp vs time for each scan<commit_after>
|
import numpy as np
import re
ms_active = raw_input("MS? : ")
field_str = raw_input("Field? : ")
tb.open(vis+"/FIELD")
names = tb.getcol('NAME')
matches = [string for string in names if re.match(field_str, string)]
posn_matches = \
[i for i, string in enumerate(names) if re.match(field_str, string)]
if len(matches) == 0:
raise TypeError("No matches found for the given field string")
tb.open(ms_active)
scanNums = sorted(np.unique(tb.getcol('SCAN_NUMBER')))
field_scans = []
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s'%ii)
field_scans.append(list(np.unique(subtable.getcol('SCAN_NUMBER'))))
tb.close()
field_scans = [scans for i, scans in field_scans if i in posn_matches]
for ii in range(len(field_scans)):
for jj in range(len(field_scans[ii])):
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = ii
scan = jj
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field'+matches[ii]+' Scan'+str(jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = 'field_'+matches[ii]+'_scan_'+str(jj)+'.png'
overwrite = True
showgui = False
async = False
plotms()
|
Save plots of amp vs time for each scan
import numpy as np
import re
ms_active = raw_input("MS? : ")
field_str = raw_input("Field? : ")
tb.open(vis+"/FIELD")
names = tb.getcol('NAME')
matches = [string for string in names if re.match(field_str, string)]
posn_matches = \
[i for i, string in enumerate(names) if re.match(field_str, string)]
if len(matches) == 0:
raise TypeError("No matches found for the given field string")
tb.open(ms_active)
scanNums = sorted(np.unique(tb.getcol('SCAN_NUMBER')))
field_scans = []
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s'%ii)
field_scans.append(list(np.unique(subtable.getcol('SCAN_NUMBER'))))
tb.close()
field_scans = [scans for i, scans in field_scans if i in posn_matches]
for ii in range(len(field_scans)):
for jj in range(len(field_scans[ii])):
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = ii
scan = jj
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field'+matches[ii]+' Scan'+str(jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = 'field_'+matches[ii]+'_scan_'+str(jj)+'.png'
overwrite = True
showgui = False
async = False
plotms()
|
<commit_before><commit_msg>Save plots of amp vs time for each scan<commit_after>
import numpy as np
import re
ms_active = raw_input("MS? : ")
field_str = raw_input("Field? : ")
tb.open(vis+"/FIELD")
names = tb.getcol('NAME')
matches = [string for string in names if re.match(field_str, string)]
posn_matches = \
[i for i, string in enumerate(names) if re.match(field_str, string)]
if len(matches) == 0:
raise TypeError("No matches found for the given field string")
tb.open(ms_active)
scanNums = sorted(np.unique(tb.getcol('SCAN_NUMBER')))
field_scans = []
for ii in range(numFields):
subtable = tb.query('FIELD_ID==%s'%ii)
field_scans.append(list(np.unique(subtable.getcol('SCAN_NUMBER'))))
tb.close()
field_scans = [scans for i, scans in field_scans if i in posn_matches]
for ii in range(len(field_scans)):
for jj in range(len(field_scans[ii])):
default('plotms')
vis = ms_active
xaxis = 'time'
yaxis = 'amp'
ydatacolumn = 'corrected'
selectdata = True
field = ii
scan = jj
correlation = "RR,LL"
averagedata = True
avgbaseline = True
transform = False
extendflag = False
plotrange = []
title = 'Amp vs Time: Field'+matches[ii]+' Scan'+str(jj)
xlabel = ''
ylabel = ''
showmajorgrid = False
showminorgrid = False
plotfile = 'field_'+matches[ii]+'_scan_'+str(jj)+'.png'
overwrite = True
showgui = False
async = False
plotms()
|
|
9b46d66f89dba1f5bd2507f0a3dcddfd2758fe2b
|
cluster/update_jobs.py
|
cluster/update_jobs.py
|
from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
Add start for the job status updater
|
Add start for the job status updater
|
Python
|
mit
|
crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp
|
Add start for the job status updater
|
from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
<commit_before><commit_msg>Add start for the job status updater<commit_after>
|
from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
Add start for the job status updaterfrom django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
<commit_before><commit_msg>Add start for the job status updater<commit_after>from django.contrib.auth.models import User
from models import Job
from interface import get_all_jobs
def run_all():
for user in User.objects.all():
creds = user.credentials.all()
for i, cluster in enumerate(get_all_jobs(user)):
cred = creds[i]
jobs = {}
for job in cluster["jobs"]:
status = job[-1]
job_id = job[0]
if status in jobs:
jobs[status].append(job_id)
else:
jobs[status] = [job_id]
Job.update_states(cred, jobs)
if __name__ == "__main__":
run_all()
|
|
013f2e526c862ed8e2c9b79aba43618b381d4bd3
|
test/test_session_getchatserver.py
|
test/test_session_getchatserver.py
|
import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
|
Add missing test for getchatserver
|
Add missing test for getchatserver
Forgot to track it -.-
|
Python
|
bsd-3-clause
|
Pytwitcher/pytwitcherapi,Pytwitcher/pytwitcherapi
|
Add missing test for getchatserver
Forgot to track it -.-
|
import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
|
<commit_before><commit_msg>Add missing test for getchatserver
Forgot to track it -.-<commit_after>
|
import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
|
Add missing test for getchatserver
Forgot to track it -.-import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
|
<commit_before><commit_msg>Add missing test for getchatserver
Forgot to track it -.-<commit_after>import mock
import pytest
import requests
from pytwitcherapi import session
@pytest.fixture(scope="function")
def ts(mock_session):
"""Return a :class:`session.TwitchSession`
and mock the request of :class:`Session`
"""
return session.TwitchSession()
@pytest.fixture(scope='function')
def mock_chatpropresponse(servers, mock_session):
chatservers = [s.address for s in servers]
channelprop = {"chat_servers": chatservers}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = channelprop
return chatpropresponse
@pytest.fixture(scope='function')
def mock_serverstatresponse(servers_json, mock_session):
serverstatresponse = mock.Mock()
serverstatresponse.json.return_value = servers_json
return serverstatresponse
@pytest.fixture(scope='function')
def mock_chatserverresponse(mock_serverstatresponse, mock_chatpropresponse,
servers_json):
requests.Session.request.side_effect = [mock_chatpropresponse,
mock_serverstatresponse]
# if serverstatresponse is successful return the best
s = servers_json[2]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_failchatserverresponse(mock_chatpropresponse, servers_json):
serverstatresponse = mock.Mock()
serverstatresponse.raise_for_status.side_effect = requests.HTTPError()
requests.Session.request.side_effect = [mock_chatpropresponse,
serverstatresponse]
# if serverstatresponse fails just return the first
s = servers_json[0]
return s['ip'], s['port']
@pytest.fixture(scope='function')
def mock_nochatserverresponse(mock_serverstatresponse):
# random server status that will not be in the available ones
chatprop = {"chat_servers": ['0.16.64.11:80', '0.16.24.11:123']}
chatpropresponse = mock.Mock()
chatpropresponse.json.return_value = chatprop
requests.Session.request.side_effect = [chatpropresponse,
mock_serverstatresponse]
# if no server stat for the chat servers can be found just return the first
return '0.16.64.11', 80
@pytest.mark.parametrize('fix', ['mock_chatserverresponse',
'mock_failchatserverresponse',
'mock_nochatserverresponse'])
def test_get_chat_server(ts, channel1, fix, request):
expected = request.getfuncargvalue(fix)
server, port = ts.get_chat_server(channel1)
assert (server, port) == expected
|
|
aaea0100aed4ff33c4f67518d7859b78baffd2b6
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.10',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
Update the PyPI version to 0.2.11
|
Update the PyPI version to 0.2.11
|
Python
|
mit
|
electronick1/todoist-python,Doist/todoist-python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.10',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.11
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.10',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.11<commit_after>
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.10',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.11# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.10',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.11<commit_after># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.11',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
a99b2cb06acf5e4018f203c4298c62dc69655281
|
setup.py
|
setup.py
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
requires=[
'webob',
],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
Add webob as a requirement.
|
Add webob as a requirement.
|
Python
|
apache-2.0
|
open-craft/XBlock,Lyla-Fischer/xblock-sdk,nagyistoce/edx-xblock-sdk,cpennington/XBlock,nagyistoce/edx-XBlock,4eek/XBlock,edx/XBlock,Lyla-Fischer/xblock-sdk,stvstnfrd/xblock-sdk,edx-solutions/xblock-sdk,edx-solutions/XBlock,dcadams/xblock-sdk,stvstnfrd/xblock-sdk,Pilou81715/hackathon_edX,edx-solutions/XBlock,cpennington/XBlock,stvstnfrd/xblock-sdk,edx/xblock-sdk,dcadams/xblock-sdk,mitodl/XBlock,jamiefolsom/xblock-sdk,Pilou81715/hackathon_edX,jamiefolsom/xblock-sdk,lovehhf/xblock-sdk,Pilou81715/hackathon_edX,dcadams/xblock-sdk,lovehhf/XBlock,edx/XBlock,nagyistoce/edx-xblock-sdk,nagyistoce/edx-xblock-sdk,lovehhf/xblock-sdk,EDUlib/XBlock,lovehhf/XBlock,cpennington/XBlock,lovehhf/xblock-sdk,jamiefolsom/xblock-sdk,edx-solutions/xblock-sdk,mitodl/XBlock,nagyistoce/edx-xblock-sdk,Pilou81715/hackathon_edX,edx/xblock-sdk,jamiefolsom/xblock-sdk,4eek/XBlock,edx-solutions/xblock-sdk,Lyla-Fischer/xblock-sdk,open-craft/XBlock,edx-solutions/xblock-sdk,edx/xblock-sdk,lovehhf/xblock-sdk,EDUlib/XBlock,nagyistoce/edx-XBlock
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
Add webob as a requirement.
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
requires=[
'webob',
],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
<commit_before>from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
<commit_msg>Add webob as a requirement.<commit_after>
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
requires=[
'webob',
],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
Add webob as a requirement.from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
requires=[
'webob',
],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
<commit_before>from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
<commit_msg>Add webob as a requirement.<commit_after>from setuptools import setup
setup(
name='XBlock',
version='0.1',
description='XBlock Core Library',
packages=['xblock'],
requires=[
'webob',
],
entry_points={
'xblock.v1': [
'helloworld = xblock.content:HelloWorldBlock',
'html = xblock.content:HtmlBlock',
'sequence = xblock.structure:Sequence',
'vertical = xblock.structure:VerticalBlock',
'sidebar = xblock.structure:SidebarBlock',
'problem = xblock.problem:ProblemBlock',
'textinput = xblock.problem:TextInputBlock',
'equality = xblock.problem:EqualityCheckerBlock',
'attempts_scoreboard = xblock.problem:AttemptsScoreboardBlock',
'thumbs = xblock.thumbs:ThumbsBlock',
'slider = xblock.slider:Slider',
'progress_slider = xblock.slider:ProgressSlider',
]
}
)
|
955c4584a304c3b6a6dbbcf12eae3eed5e9a4cf5
|
scripts/python/cleanSimulation.py
|
scripts/python/cleanSimulation.py
|
#!/usr/bin/env/ python
# file: cleanSimulation.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Clean a cuIBM simulation.
import os
import argparse
def read_inputs():
"""Parses the command-line."""
# create parser
parser = argparse.ArgumentParser(description='Clean PetIBM case',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
# fill parser with arguments
parser.add_argument('--case', dest='case_directory', type=str,
default=os.getcwd(),
help='directory of the PetIBM simulation')
parser.add_argument('--no-images', dest='images', action='store_false',
help='does not remove the images folder')
parser.add_argument('--no-grid', dest='grid', action='store_false',
help='does not remove the grid file')
parser.add_argument('--no-solutions', dest='solutions', action='store_false',
help='does not remove the numrical solution folders')
parser.add_argument('--no-forces', dest='forces', action='store_false',
help='does not remove the forces data file')
parser.add_argument('--no-logs', dest='logs', action='store_false',
help='does not remove log files '
'(iterations, run.info)')
parser.set_defaults(images=True, grid=True, solutions=True,
forces=True, logs=True)
return parser.parse_args()
def main():
"""Cleans a cuIBM simulation."""
# parser command-line
args = read_inputs()
# get different parts to clean
parts = {}
if args.images:
parts['images'] = '%s/images' % args.case_directory
if args.grid:
parts['grid'] = '%s/grid' % args.case_directory
if args.solutions:
parts['solutions'] = '%s/0*' % args.case_directory
if args.forces:
parts['forces'] = '%s/forces' % args.case_directory
if args.logs:
parts['logs'] = ('%s/iterations %s/run.info'
% (args.case_directory, args.case_directory))
# remove appropriate files/folders
print '[case-directory] %s' % args.case_directory
for key, part in parts.iteritems():
print '\t--> removing %s ...' % key
os.system('rm -rf %s' % part)
if __name__ == '__main__':
main()
|
Add script to clean a simulation
|
Add script to clean a simulation
|
Python
|
mit
|
barbagroup/cuIBM,barbagroup/cuIBM,barbagroup/cuIBM
|
Add script to clean a simulation
|
#!/usr/bin/env/ python
# file: cleanSimulation.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Clean a cuIBM simulation.
import os
import argparse
def read_inputs():
"""Parses the command-line."""
# create parser
parser = argparse.ArgumentParser(description='Clean PetIBM case',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
# fill parser with arguments
parser.add_argument('--case', dest='case_directory', type=str,
default=os.getcwd(),
help='directory of the PetIBM simulation')
parser.add_argument('--no-images', dest='images', action='store_false',
help='does not remove the images folder')
parser.add_argument('--no-grid', dest='grid', action='store_false',
help='does not remove the grid file')
parser.add_argument('--no-solutions', dest='solutions', action='store_false',
help='does not remove the numrical solution folders')
parser.add_argument('--no-forces', dest='forces', action='store_false',
help='does not remove the forces data file')
parser.add_argument('--no-logs', dest='logs', action='store_false',
help='does not remove log files '
'(iterations, run.info)')
parser.set_defaults(images=True, grid=True, solutions=True,
forces=True, logs=True)
return parser.parse_args()
def main():
"""Cleans a cuIBM simulation."""
# parser command-line
args = read_inputs()
# get different parts to clean
parts = {}
if args.images:
parts['images'] = '%s/images' % args.case_directory
if args.grid:
parts['grid'] = '%s/grid' % args.case_directory
if args.solutions:
parts['solutions'] = '%s/0*' % args.case_directory
if args.forces:
parts['forces'] = '%s/forces' % args.case_directory
if args.logs:
parts['logs'] = ('%s/iterations %s/run.info'
% (args.case_directory, args.case_directory))
# remove appropriate files/folders
print '[case-directory] %s' % args.case_directory
for key, part in parts.iteritems():
print '\t--> removing %s ...' % key
os.system('rm -rf %s' % part)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to clean a simulation<commit_after>
|
#!/usr/bin/env/ python
# file: cleanSimulation.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Clean a cuIBM simulation.
import os
import argparse
def read_inputs():
"""Parses the command-line."""
# create parser
parser = argparse.ArgumentParser(description='Clean PetIBM case',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
# fill parser with arguments
parser.add_argument('--case', dest='case_directory', type=str,
default=os.getcwd(),
help='directory of the PetIBM simulation')
parser.add_argument('--no-images', dest='images', action='store_false',
help='does not remove the images folder')
parser.add_argument('--no-grid', dest='grid', action='store_false',
help='does not remove the grid file')
parser.add_argument('--no-solutions', dest='solutions', action='store_false',
help='does not remove the numrical solution folders')
parser.add_argument('--no-forces', dest='forces', action='store_false',
help='does not remove the forces data file')
parser.add_argument('--no-logs', dest='logs', action='store_false',
help='does not remove log files '
'(iterations, run.info)')
parser.set_defaults(images=True, grid=True, solutions=True,
forces=True, logs=True)
return parser.parse_args()
def main():
"""Cleans a cuIBM simulation."""
# parser command-line
args = read_inputs()
# get different parts to clean
parts = {}
if args.images:
parts['images'] = '%s/images' % args.case_directory
if args.grid:
parts['grid'] = '%s/grid' % args.case_directory
if args.solutions:
parts['solutions'] = '%s/0*' % args.case_directory
if args.forces:
parts['forces'] = '%s/forces' % args.case_directory
if args.logs:
parts['logs'] = ('%s/iterations %s/run.info'
% (args.case_directory, args.case_directory))
# remove appropriate files/folders
print '[case-directory] %s' % args.case_directory
for key, part in parts.iteritems():
print '\t--> removing %s ...' % key
os.system('rm -rf %s' % part)
if __name__ == '__main__':
main()
|
Add script to clean a simulation#!/usr/bin/env/ python
# file: cleanSimulation.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Clean a cuIBM simulation.
import os
import argparse
def read_inputs():
"""Parses the command-line."""
# create parser
parser = argparse.ArgumentParser(description='Clean PetIBM case',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
# fill parser with arguments
parser.add_argument('--case', dest='case_directory', type=str,
default=os.getcwd(),
help='directory of the PetIBM simulation')
parser.add_argument('--no-images', dest='images', action='store_false',
help='does not remove the images folder')
parser.add_argument('--no-grid', dest='grid', action='store_false',
help='does not remove the grid file')
parser.add_argument('--no-solutions', dest='solutions', action='store_false',
help='does not remove the numrical solution folders')
parser.add_argument('--no-forces', dest='forces', action='store_false',
help='does not remove the forces data file')
parser.add_argument('--no-logs', dest='logs', action='store_false',
help='does not remove log files '
'(iterations, run.info)')
parser.set_defaults(images=True, grid=True, solutions=True,
forces=True, logs=True)
return parser.parse_args()
def main():
"""Cleans a cuIBM simulation."""
# parser command-line
args = read_inputs()
# get different parts to clean
parts = {}
if args.images:
parts['images'] = '%s/images' % args.case_directory
if args.grid:
parts['grid'] = '%s/grid' % args.case_directory
if args.solutions:
parts['solutions'] = '%s/0*' % args.case_directory
if args.forces:
parts['forces'] = '%s/forces' % args.case_directory
if args.logs:
parts['logs'] = ('%s/iterations %s/run.info'
% (args.case_directory, args.case_directory))
# remove appropriate files/folders
print '[case-directory] %s' % args.case_directory
for key, part in parts.iteritems():
print '\t--> removing %s ...' % key
os.system('rm -rf %s' % part)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to clean a simulation<commit_after>#!/usr/bin/env/ python
# file: cleanSimulation.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Clean a cuIBM simulation.
import os
import argparse
def read_inputs():
"""Parses the command-line."""
# create parser
parser = argparse.ArgumentParser(description='Clean PetIBM case',
formatter_class= argparse.ArgumentDefaultsHelpFormatter)
# fill parser with arguments
parser.add_argument('--case', dest='case_directory', type=str,
default=os.getcwd(),
help='directory of the PetIBM simulation')
parser.add_argument('--no-images', dest='images', action='store_false',
help='does not remove the images folder')
parser.add_argument('--no-grid', dest='grid', action='store_false',
help='does not remove the grid file')
parser.add_argument('--no-solutions', dest='solutions', action='store_false',
help='does not remove the numrical solution folders')
parser.add_argument('--no-forces', dest='forces', action='store_false',
help='does not remove the forces data file')
parser.add_argument('--no-logs', dest='logs', action='store_false',
help='does not remove log files '
'(iterations, run.info)')
parser.set_defaults(images=True, grid=True, solutions=True,
forces=True, logs=True)
return parser.parse_args()
def main():
"""Cleans a cuIBM simulation."""
# parser command-line
args = read_inputs()
# get different parts to clean
parts = {}
if args.images:
parts['images'] = '%s/images' % args.case_directory
if args.grid:
parts['grid'] = '%s/grid' % args.case_directory
if args.solutions:
parts['solutions'] = '%s/0*' % args.case_directory
if args.forces:
parts['forces'] = '%s/forces' % args.case_directory
if args.logs:
parts['logs'] = ('%s/iterations %s/run.info'
% (args.case_directory, args.case_directory))
# remove appropriate files/folders
print '[case-directory] %s' % args.case_directory
for key, part in parts.iteritems():
print '\t--> removing %s ...' % key
os.system('rm -rf %s' % part)
if __name__ == '__main__':
main()
|
|
048939778e5637eff997d674395d0f6df860a3eb
|
tests/integration/test_debugger.py
|
tests/integration/test_debugger.py
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
import sys
import time
import threading
from urllib import parse as urlparse
import pytest
from bugbuzz import BugBuzz
from bugbuzz.packages import requests
# just a dummy unicode string, to see if we can handle unicode correctly
DUMMY_STR = u'除錯'
@pytest.fixture(scope='session')
def bugbuzz_dbg(
base_url='https://bugbuzz-api.herokuapp.com',
dashboard_url='http://dashboard.bugbuzz.io/',
):
return BugBuzz(base_url, dashboard_url)
def test_set_trace(mocker, bugbuzz_dbg):
mocker.patch('webbrowser.open_new_tab')
# post continue command
def post_continue():
time.sleep(3)
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}/actions/continue'.format(
bugbuzz_dbg.client.session_id
),
)
requests.post(url)
thread = threading.Thread(target=post_continue)
thread.daemon = True
thread.start()
# TODO: set a timeout here?
bugbuzz_dbg.set_trace(sys._getframe())
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}'.format(bugbuzz_dbg.client.session_id),
)
resp = requests.get(url)
session = resp.json()['session']
assert len(session['files']) == 1
assert len(session['breaks']) == 1
|
Add test to reproduce bug
|
Add test to reproduce bug
|
Python
|
mit
|
victorlin/bugbuzz-python,victorlin/bugbuzz-python
|
Add test to reproduce bug
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
import sys
import time
import threading
from urllib import parse as urlparse
import pytest
from bugbuzz import BugBuzz
from bugbuzz.packages import requests
# just a dummy unicode string, to see if we can handle unicode correctly
DUMMY_STR = u'除錯'
@pytest.fixture(scope='session')
def bugbuzz_dbg(
base_url='https://bugbuzz-api.herokuapp.com',
dashboard_url='http://dashboard.bugbuzz.io/',
):
return BugBuzz(base_url, dashboard_url)
def test_set_trace(mocker, bugbuzz_dbg):
mocker.patch('webbrowser.open_new_tab')
# post continue command
def post_continue():
time.sleep(3)
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}/actions/continue'.format(
bugbuzz_dbg.client.session_id
),
)
requests.post(url)
thread = threading.Thread(target=post_continue)
thread.daemon = True
thread.start()
# TODO: set a timeout here?
bugbuzz_dbg.set_trace(sys._getframe())
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}'.format(bugbuzz_dbg.client.session_id),
)
resp = requests.get(url)
session = resp.json()['session']
assert len(session['files']) == 1
assert len(session['breaks']) == 1
|
<commit_before><commit_msg>Add test to reproduce bug<commit_after>
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
import sys
import time
import threading
from urllib import parse as urlparse
import pytest
from bugbuzz import BugBuzz
from bugbuzz.packages import requests
# just a dummy unicode string, to see if we can handle unicode correctly
DUMMY_STR = u'除錯'
@pytest.fixture(scope='session')
def bugbuzz_dbg(
base_url='https://bugbuzz-api.herokuapp.com',
dashboard_url='http://dashboard.bugbuzz.io/',
):
return BugBuzz(base_url, dashboard_url)
def test_set_trace(mocker, bugbuzz_dbg):
mocker.patch('webbrowser.open_new_tab')
# post continue command
def post_continue():
time.sleep(3)
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}/actions/continue'.format(
bugbuzz_dbg.client.session_id
),
)
requests.post(url)
thread = threading.Thread(target=post_continue)
thread.daemon = True
thread.start()
# TODO: set a timeout here?
bugbuzz_dbg.set_trace(sys._getframe())
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}'.format(bugbuzz_dbg.client.session_id),
)
resp = requests.get(url)
session = resp.json()['session']
assert len(session['files']) == 1
assert len(session['breaks']) == 1
|
Add test to reproduce bug# -*- coding: utf8 -*-
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
import sys
import time
import threading
from urllib import parse as urlparse
import pytest
from bugbuzz import BugBuzz
from bugbuzz.packages import requests
# just a dummy unicode string, to see if we can handle unicode correctly
DUMMY_STR = u'除錯'
@pytest.fixture(scope='session')
def bugbuzz_dbg(
base_url='https://bugbuzz-api.herokuapp.com',
dashboard_url='http://dashboard.bugbuzz.io/',
):
return BugBuzz(base_url, dashboard_url)
def test_set_trace(mocker, bugbuzz_dbg):
mocker.patch('webbrowser.open_new_tab')
# post continue command
def post_continue():
time.sleep(3)
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}/actions/continue'.format(
bugbuzz_dbg.client.session_id
),
)
requests.post(url)
thread = threading.Thread(target=post_continue)
thread.daemon = True
thread.start()
# TODO: set a timeout here?
bugbuzz_dbg.set_trace(sys._getframe())
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}'.format(bugbuzz_dbg.client.session_id),
)
resp = requests.get(url)
session = resp.json()['session']
assert len(session['files']) == 1
assert len(session['breaks']) == 1
|
<commit_before><commit_msg>Add test to reproduce bug<commit_after># -*- coding: utf8 -*-
from __future__ import unicode_literals
from future.standard_library import install_aliases
install_aliases()
import sys
import time
import threading
from urllib import parse as urlparse
import pytest
from bugbuzz import BugBuzz
from bugbuzz.packages import requests
# just a dummy unicode string, to see if we can handle unicode correctly
DUMMY_STR = u'除錯'
@pytest.fixture(scope='session')
def bugbuzz_dbg(
base_url='https://bugbuzz-api.herokuapp.com',
dashboard_url='http://dashboard.bugbuzz.io/',
):
return BugBuzz(base_url, dashboard_url)
def test_set_trace(mocker, bugbuzz_dbg):
mocker.patch('webbrowser.open_new_tab')
# post continue command
def post_continue():
time.sleep(3)
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}/actions/continue'.format(
bugbuzz_dbg.client.session_id
),
)
requests.post(url)
thread = threading.Thread(target=post_continue)
thread.daemon = True
thread.start()
# TODO: set a timeout here?
bugbuzz_dbg.set_trace(sys._getframe())
url = urlparse.urljoin(
bugbuzz_dbg.base_url,
'/sessions/{}'.format(bugbuzz_dbg.client.session_id),
)
resp = requests.get(url)
session = resp.json()['session']
assert len(session['files']) == 1
assert len(session['breaks']) == 1
|
|
d7ab04186f3b8c7c58b654a7372b1d4f3ffad64e
|
tests/unit/test_domain_commands.py
|
tests/unit/test_domain_commands.py
|
from caspy.domain import command, models
class TestBook:
def test_prepare_new_book(self):
empty_book = models.Book()
result = command.prepare_book(empty_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'now'
def test_prepare_old_book(self):
dated_book = models.Book(created_at='last week')
result = command.prepare_book(dated_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'last week'
|
Add unit tests for prepare_book
|
Add unit tests for prepare_book
|
Python
|
bsd-3-clause
|
altaurog/django-caspy,altaurog/django-caspy,altaurog/django-caspy
|
Add unit tests for prepare_book
|
from caspy.domain import command, models
class TestBook:
def test_prepare_new_book(self):
empty_book = models.Book()
result = command.prepare_book(empty_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'now'
def test_prepare_old_book(self):
dated_book = models.Book(created_at='last week')
result = command.prepare_book(dated_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'last week'
|
<commit_before><commit_msg>Add unit tests for prepare_book<commit_after>
|
from caspy.domain import command, models
class TestBook:
def test_prepare_new_book(self):
empty_book = models.Book()
result = command.prepare_book(empty_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'now'
def test_prepare_old_book(self):
dated_book = models.Book(created_at='last week')
result = command.prepare_book(dated_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'last week'
|
Add unit tests for prepare_bookfrom caspy.domain import command, models
class TestBook:
def test_prepare_new_book(self):
empty_book = models.Book()
result = command.prepare_book(empty_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'now'
def test_prepare_old_book(self):
dated_book = models.Book(created_at='last week')
result = command.prepare_book(dated_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'last week'
|
<commit_before><commit_msg>Add unit tests for prepare_book<commit_after>from caspy.domain import command, models
class TestBook:
def test_prepare_new_book(self):
empty_book = models.Book()
result = command.prepare_book(empty_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'now'
def test_prepare_old_book(self):
dated_book = models.Book(created_at='last week')
result = command.prepare_book(dated_book, 'now')
assert isinstance(result, models.Book)
assert result.created_at == 'last week'
|
|
eac5962530542e1732326c1c0173294682d6256b
|
models/ras_220_genes/pmc_ids_venn.py
|
models/ras_220_genes/pmc_ids_venn.py
|
"""Plot a Venn diagram showing the IDs associated with articles in PMC."""
import matplotlib_venn as mv
import csv
from matplotlib import pyplot as plt
import plot_formatting as pf
pf.set_fig_params()
all_pmcids = set([])
has_doi = set([])
has_pmid = set([])
with open('PMC-ids.csv') as f:
csvreader = csv.reader(f, delimiter=',')
for row in csvreader:
pmcid = row[8].strip()
pmid = row[9].strip()
doi = row[7].strip()
all_pmcids.add(pmcid)
if doi:
has_doi.add(pmcid)
if pmid:
has_pmid.add(pmcid)
print len(all_pmcids)
plt.figure(figsize=(4, 4), dpi=150)
res = mv.venn2([has_doi, has_pmid],
("DOI", "PMID"))
plt.title('IDs for articles in PMC')
num_neither = len(all_pmcids.difference(has_doi).difference(has_pmid))
def commafy(text):
text_with_commas = ''
for ix, char in enumerate(reversed(str(text))):
if ix % 3 == 0 and ix != 0:
text_with_commas += ','
text_with_commas += char
return text_with_commas[::-1]
plt.text(-0.55, -0.8, '(plus %s with no DOI or PMID)' % commafy(num_neither))
# Add commas
for label in res.subset_labels:
text = str(label.get_text())
label.set_text(commafy(text))
plt.show()
plt.savefig('pmc_ids_venn.png', dpi=150)
plt.savefig('pmc_ids_venn.pdf')
|
Make Venn diagram of IDs in PMC
|
Make Venn diagram of IDs in PMC
|
Python
|
bsd-2-clause
|
jmuhlich/indra,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,pvtodorov/indra,bgyori/indra,pvtodorov/indra,sorgerlab/belpy,johnbachman/belpy,jmuhlich/indra,sorgerlab/belpy,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,bgyori/indra,jmuhlich/indra,bgyori/indra,johnbachman/indra
|
Make Venn diagram of IDs in PMC
|
"""Plot a Venn diagram showing the IDs associated with articles in PMC."""
import matplotlib_venn as mv
import csv
from matplotlib import pyplot as plt
import plot_formatting as pf
pf.set_fig_params()
all_pmcids = set([])
has_doi = set([])
has_pmid = set([])
with open('PMC-ids.csv') as f:
csvreader = csv.reader(f, delimiter=',')
for row in csvreader:
pmcid = row[8].strip()
pmid = row[9].strip()
doi = row[7].strip()
all_pmcids.add(pmcid)
if doi:
has_doi.add(pmcid)
if pmid:
has_pmid.add(pmcid)
print len(all_pmcids)
plt.figure(figsize=(4, 4), dpi=150)
res = mv.venn2([has_doi, has_pmid],
("DOI", "PMID"))
plt.title('IDs for articles in PMC')
num_neither = len(all_pmcids.difference(has_doi).difference(has_pmid))
def commafy(text):
text_with_commas = ''
for ix, char in enumerate(reversed(str(text))):
if ix % 3 == 0 and ix != 0:
text_with_commas += ','
text_with_commas += char
return text_with_commas[::-1]
plt.text(-0.55, -0.8, '(plus %s with no DOI or PMID)' % commafy(num_neither))
# Add commas
for label in res.subset_labels:
text = str(label.get_text())
label.set_text(commafy(text))
plt.show()
plt.savefig('pmc_ids_venn.png', dpi=150)
plt.savefig('pmc_ids_venn.pdf')
|
<commit_before><commit_msg>Make Venn diagram of IDs in PMC<commit_after>
|
"""Plot a Venn diagram showing the IDs associated with articles in PMC."""
import matplotlib_venn as mv
import csv
from matplotlib import pyplot as plt
import plot_formatting as pf
pf.set_fig_params()
all_pmcids = set([])
has_doi = set([])
has_pmid = set([])
with open('PMC-ids.csv') as f:
csvreader = csv.reader(f, delimiter=',')
for row in csvreader:
pmcid = row[8].strip()
pmid = row[9].strip()
doi = row[7].strip()
all_pmcids.add(pmcid)
if doi:
has_doi.add(pmcid)
if pmid:
has_pmid.add(pmcid)
print len(all_pmcids)
plt.figure(figsize=(4, 4), dpi=150)
res = mv.venn2([has_doi, has_pmid],
("DOI", "PMID"))
plt.title('IDs for articles in PMC')
num_neither = len(all_pmcids.difference(has_doi).difference(has_pmid))
def commafy(text):
text_with_commas = ''
for ix, char in enumerate(reversed(str(text))):
if ix % 3 == 0 and ix != 0:
text_with_commas += ','
text_with_commas += char
return text_with_commas[::-1]
plt.text(-0.55, -0.8, '(plus %s with no DOI or PMID)' % commafy(num_neither))
# Add commas
for label in res.subset_labels:
text = str(label.get_text())
label.set_text(commafy(text))
plt.show()
plt.savefig('pmc_ids_venn.png', dpi=150)
plt.savefig('pmc_ids_venn.pdf')
|
Make Venn diagram of IDs in PMC"""Plot a Venn diagram showing the IDs associated with articles in PMC."""
import matplotlib_venn as mv
import csv
from matplotlib import pyplot as plt
import plot_formatting as pf
pf.set_fig_params()
all_pmcids = set([])
has_doi = set([])
has_pmid = set([])
with open('PMC-ids.csv') as f:
csvreader = csv.reader(f, delimiter=',')
for row in csvreader:
pmcid = row[8].strip()
pmid = row[9].strip()
doi = row[7].strip()
all_pmcids.add(pmcid)
if doi:
has_doi.add(pmcid)
if pmid:
has_pmid.add(pmcid)
print len(all_pmcids)
plt.figure(figsize=(4, 4), dpi=150)
res = mv.venn2([has_doi, has_pmid],
("DOI", "PMID"))
plt.title('IDs for articles in PMC')
num_neither = len(all_pmcids.difference(has_doi).difference(has_pmid))
def commafy(text):
text_with_commas = ''
for ix, char in enumerate(reversed(str(text))):
if ix % 3 == 0 and ix != 0:
text_with_commas += ','
text_with_commas += char
return text_with_commas[::-1]
plt.text(-0.55, -0.8, '(plus %s with no DOI or PMID)' % commafy(num_neither))
# Add commas
for label in res.subset_labels:
text = str(label.get_text())
label.set_text(commafy(text))
plt.show()
plt.savefig('pmc_ids_venn.png', dpi=150)
plt.savefig('pmc_ids_venn.pdf')
|
<commit_before><commit_msg>Make Venn diagram of IDs in PMC<commit_after>"""Plot a Venn diagram showing the IDs associated with articles in PMC."""
import matplotlib_venn as mv
import csv
from matplotlib import pyplot as plt
import plot_formatting as pf
pf.set_fig_params()
all_pmcids = set([])
has_doi = set([])
has_pmid = set([])
with open('PMC-ids.csv') as f:
csvreader = csv.reader(f, delimiter=',')
for row in csvreader:
pmcid = row[8].strip()
pmid = row[9].strip()
doi = row[7].strip()
all_pmcids.add(pmcid)
if doi:
has_doi.add(pmcid)
if pmid:
has_pmid.add(pmcid)
print len(all_pmcids)
plt.figure(figsize=(4, 4), dpi=150)
res = mv.venn2([has_doi, has_pmid],
("DOI", "PMID"))
plt.title('IDs for articles in PMC')
num_neither = len(all_pmcids.difference(has_doi).difference(has_pmid))
def commafy(text):
text_with_commas = ''
for ix, char in enumerate(reversed(str(text))):
if ix % 3 == 0 and ix != 0:
text_with_commas += ','
text_with_commas += char
return text_with_commas[::-1]
plt.text(-0.55, -0.8, '(plus %s with no DOI or PMID)' % commafy(num_neither))
# Add commas
for label in res.subset_labels:
text = str(label.get_text())
label.set_text(commafy(text))
plt.show()
plt.savefig('pmc_ids_venn.png', dpi=150)
plt.savefig('pmc_ids_venn.pdf')
|
|
2ca84d16ff355a3e5a271ba800acb7c3cf4f8441
|
librisxl-tools/scripts/split_auth_source.py
|
librisxl-tools/scripts/split_auth_source.py
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import re
from os import makedirs, path as P
find_token = re.compile(r'{"(100|110|111|130|148|150|151|155|162|180|181|182|185)":').findall
def split_auth_source(sourcefile, outdir):
name_parts = P.basename(sourcefile).split('.', 1)
if not P.exists(outdir):
makedirs(outdir)
outfiles = {}
try:
source = open(sourcefile)
for i, l in enumerate(source):
for token in find_token(l):
outfp = outfiles.get(token)
if not outfp:
outfile = P.join(outdir,
'%s-%s.%s' % (name_parts[0], token, name_parts[1]))
print("Opening %s for writing..." % outfile)
outfp = outfiles[token] = open(outfile, 'w')
print(l, end="", file=outfp)
break
finally:
source.close()
for outfp in outfiles.values():
outfp.close()
if __name__ == '__main__':
import sys
args = sys.argv[1:]
sourcefile = args.pop(0)
outdir = args.pop(0)
split_auth_source(sourcefile, outdir)
|
Add script for splitting an auth dump on primary field presence
|
Add script for splitting an auth dump on primary field presence
|
Python
|
apache-2.0
|
libris/librisxl,libris/librisxl,libris/librisxl
|
Add script for splitting an auth dump on primary field presence
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import re
from os import makedirs, path as P
find_token = re.compile(r'{"(100|110|111|130|148|150|151|155|162|180|181|182|185)":').findall
def split_auth_source(sourcefile, outdir):
name_parts = P.basename(sourcefile).split('.', 1)
if not P.exists(outdir):
makedirs(outdir)
outfiles = {}
try:
source = open(sourcefile)
for i, l in enumerate(source):
for token in find_token(l):
outfp = outfiles.get(token)
if not outfp:
outfile = P.join(outdir,
'%s-%s.%s' % (name_parts[0], token, name_parts[1]))
print("Opening %s for writing..." % outfile)
outfp = outfiles[token] = open(outfile, 'w')
print(l, end="", file=outfp)
break
finally:
source.close()
for outfp in outfiles.values():
outfp.close()
if __name__ == '__main__':
import sys
args = sys.argv[1:]
sourcefile = args.pop(0)
outdir = args.pop(0)
split_auth_source(sourcefile, outdir)
|
<commit_before><commit_msg>Add script for splitting an auth dump on primary field presence<commit_after>
|
#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import re
from os import makedirs, path as P
find_token = re.compile(r'{"(100|110|111|130|148|150|151|155|162|180|181|182|185)":').findall
def split_auth_source(sourcefile, outdir):
name_parts = P.basename(sourcefile).split('.', 1)
if not P.exists(outdir):
makedirs(outdir)
outfiles = {}
try:
source = open(sourcefile)
for i, l in enumerate(source):
for token in find_token(l):
outfp = outfiles.get(token)
if not outfp:
outfile = P.join(outdir,
'%s-%s.%s' % (name_parts[0], token, name_parts[1]))
print("Opening %s for writing..." % outfile)
outfp = outfiles[token] = open(outfile, 'w')
print(l, end="", file=outfp)
break
finally:
source.close()
for outfp in outfiles.values():
outfp.close()
if __name__ == '__main__':
import sys
args = sys.argv[1:]
sourcefile = args.pop(0)
outdir = args.pop(0)
split_auth_source(sourcefile, outdir)
|
Add script for splitting an auth dump on primary field presence#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import re
from os import makedirs, path as P
find_token = re.compile(r'{"(100|110|111|130|148|150|151|155|162|180|181|182|185)":').findall
def split_auth_source(sourcefile, outdir):
name_parts = P.basename(sourcefile).split('.', 1)
if not P.exists(outdir):
makedirs(outdir)
outfiles = {}
try:
source = open(sourcefile)
for i, l in enumerate(source):
for token in find_token(l):
outfp = outfiles.get(token)
if not outfp:
outfile = P.join(outdir,
'%s-%s.%s' % (name_parts[0], token, name_parts[1]))
print("Opening %s for writing..." % outfile)
outfp = outfiles[token] = open(outfile, 'w')
print(l, end="", file=outfp)
break
finally:
source.close()
for outfp in outfiles.values():
outfp.close()
if __name__ == '__main__':
import sys
args = sys.argv[1:]
sourcefile = args.pop(0)
outdir = args.pop(0)
split_auth_source(sourcefile, outdir)
|
<commit_before><commit_msg>Add script for splitting an auth dump on primary field presence<commit_after>#!/usr/bin/env python
from __future__ import unicode_literals, print_function
import re
from os import makedirs, path as P
find_token = re.compile(r'{"(100|110|111|130|148|150|151|155|162|180|181|182|185)":').findall
def split_auth_source(sourcefile, outdir):
name_parts = P.basename(sourcefile).split('.', 1)
if not P.exists(outdir):
makedirs(outdir)
outfiles = {}
try:
source = open(sourcefile)
for i, l in enumerate(source):
for token in find_token(l):
outfp = outfiles.get(token)
if not outfp:
outfile = P.join(outdir,
'%s-%s.%s' % (name_parts[0], token, name_parts[1]))
print("Opening %s for writing..." % outfile)
outfp = outfiles[token] = open(outfile, 'w')
print(l, end="", file=outfp)
break
finally:
source.close()
for outfp in outfiles.values():
outfp.close()
if __name__ == '__main__':
import sys
args = sys.argv[1:]
sourcefile = args.pop(0)
outdir = args.pop(0)
split_auth_source(sourcefile, outdir)
|
|
d55b5aa49d2ba1c98d568660b1a91b4b552872f0
|
numpy/core/tests/test_scalarprint.py
|
numpy/core/tests/test_scalarprint.py
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, assert_, run_module_suite
class TestRealScalars(TestCase):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
actual = [str(f(c)) for c in svals for f in styps]
wanted = [
'0.0', '0.0', '0.0', '0.0',
'-0.0', '-0.0', '-0.0', '-0.0',
'1.0', '1.0', '1.0', '1.0',
'-1.0', '-1.0', '-1.0', '-1.0',
'inf', 'inf', 'inf', 'inf',
'-inf', '-inf', '-inf', '-inf',
'nan', 'nan', 'nan', 'nan']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
|
Add test for printing of scalar values.
|
TST: Add test for printing of scalar values.
|
Python
|
bsd-3-clause
|
ViralLeadership/numpy,githubmlai/numpy,bringingheavendown/numpy,sigma-random/numpy,groutr/numpy,anntzer/numpy,drasmuss/numpy,chatcannon/numpy,pdebuyl/numpy,ChristopherHogan/numpy,dato-code/numpy,ViralLeadership/numpy,tacaswell/numpy,tdsmith/numpy,rudimeier/numpy,leifdenby/numpy,jakirkham/numpy,solarjoe/numpy,mwiebe/numpy,simongibbons/numpy,bmorris3/numpy,jonathanunderwood/numpy,rudimeier/numpy,Anwesh43/numpy,immerrr/numpy,hainm/numpy,yiakwy/numpy,dwillmer/numpy,has2k1/numpy,Srisai85/numpy,leifdenby/numpy,MSeifert04/numpy,jorisvandenbossche/numpy,joferkington/numpy,immerrr/numpy,mingwpy/numpy,larsmans/numpy,mathdd/numpy,utke1/numpy,dimasad/numpy,dimasad/numpy,shoyer/numpy,ewmoore/numpy,BMJHayward/numpy,mattip/numpy,madphysicist/numpy,rgommers/numpy,SunghanKim/numpy,GrimDerp/numpy,mhvk/numpy,cowlicks/numpy,skwbc/numpy,mathdd/numpy,simongibbons/numpy,jankoslavic/numpy,brandon-rhodes/numpy,njase/numpy,SiccarPoint/numpy,stuarteberg/numpy,andsor/numpy,pdebuyl/numpy,shoyer/numpy,mindw/numpy,pbrod/numpy,naritta/numpy,MSeifert04/numpy,stuarteberg/numpy,rajathkumarmp/numpy,ssanderson/numpy,githubmlai/numpy,sigma-random/numpy,WarrenWeckesser/numpy,maniteja123/numpy,jorisvandenbossche/numpy,ogrisel/numpy,embray/numpy,githubmlai/numpy,ahaldane/numpy,jschueller/numpy,ssanderson/numpy,AustereCuriosity/numpy,dato-code/numpy,ajdawson/numpy,WillieMaddox/numpy,Srisai85/numpy,hainm/numpy,ChristopherHogan/numpy,gmcastil/numpy,skwbc/numpy,embray/numpy,utke1/numpy,mhvk/numpy,cowlicks/numpy,rudimeier/numpy,tynn/numpy,Yusa95/numpy,mwiebe/numpy,Eric89GXL/numpy,maniteja123/numpy,ahaldane/numpy,dato-code/numpy,Anwesh43/numpy,ahaldane/numpy,argriffing/numpy,larsmans/numpy,Linkid/numpy,GaZ3ll3/numpy,rherault-insa/numpy,skymanaditya1/numpy,ContinuumIO/numpy,bringingheavendown/numpy,ssanderson/numpy,sinhrks/numpy,nbeaver/numpy,dwillmer/numpy,solarjoe/numpy,dwillmer/numpy,pbrod/numpy,numpy/numpy,pizzathief/numpy,GrimDerp/numpy,GrimDerp/numpy,rherault-insa/numpy,githubmlai/numpy,dwillmer/numpy,BabeNovelty/numpy,pizzathief/numpy,ekalosak/numpy,ddasilva/numpy,jakirkham/numpy,numpy/numpy,seberg/numpy,bringingheavendown/numpy,ESSS/numpy,jakirkham/numpy,rmcgibbo/numpy,behzadnouri/numpy,tacaswell/numpy,nbeaver/numpy,KaelChen/numpy,charris/numpy,jorisvandenbossche/numpy,ContinuumIO/numpy,ChanderG/numpy,mathdd/numpy,BabeNovelty/numpy,NextThought/pypy-numpy,Dapid/numpy,mortada/numpy,empeeu/numpy,nguyentu1602/numpy,rherault-insa/numpy,shoyer/numpy,tdsmith/numpy,ChanderG/numpy,moreati/numpy,seberg/numpy,mindw/numpy,musically-ut/numpy,SunghanKim/numpy,madphysicist/numpy,pyparallel/numpy,sonnyhu/numpy,MSeifert04/numpy,dch312/numpy,felipebetancur/numpy,rhythmsosad/numpy,skwbc/numpy,njase/numpy,empeeu/numpy,larsmans/numpy,b-carter/numpy,rmcgibbo/numpy,seberg/numpy,MichaelAquilina/numpy,SiccarPoint/numpy,Yusa95/numpy,NextThought/pypy-numpy,Srisai85/numpy,jorisvandenbossche/numpy,ChanderG/numpy,rgommers/numpy,ChanderG/numpy,ESSS/numpy,kiwifb/numpy,felipebetancur/numpy,Eric89GXL/numpy,kirillzhuravlev/numpy,mortada/numpy,mingwpy/numpy,trankmichael/numpy,groutr/numpy,bmorris3/numpy,endolith/numpy,empeeu/numpy,anntzer/numpy,kiwifb/numpy,kirillzhuravlev/numpy,jakirkham/numpy,CMartelLML/numpy,joferkington/numpy,argriffing/numpy,nguyentu1602/numpy,ahaldane/numpy,rajathkumarmp/numpy,ekalosak/numpy,has2k1/numpy,SiccarPoint/numpy,ddasilva/numpy,jakirkham/numpy,immerrr/numpy,pyparallel/numpy,NextThought/pypy-numpy,ChristopherHogan/numpy,jschueller/numpy,mortada/numpy,jorisvandenbossche/numpy,mattip/numpy,shoyer/numpy,BMJHayward/numpy,SiccarPoint/numpy,numpy/numpy,grlee77/numpy,sigma-random/numpy,NextThought/pypy-numpy,GaZ3ll3/numpy,brandon-rhodes/numpy,ewmoore/numpy,mattip/numpy,tynn/numpy,CMartelLML/numpy,Anwesh43/numpy,brandon-rhodes/numpy,ahaldane/numpy,drasmuss/numpy,GrimDerp/numpy,chiffa/numpy,SunghanKim/numpy,stuarteberg/numpy,grlee77/numpy,behzadnouri/numpy,bertrand-l/numpy,yiakwy/numpy,trankmichael/numpy,b-carter/numpy,pbrod/numpy,MSeifert04/numpy,WarrenWeckesser/numpy,BMJHayward/numpy,rhythmsosad/numpy,tacaswell/numpy,Anwesh43/numpy,charris/numpy,behzadnouri/numpy,WarrenWeckesser/numpy,jankoslavic/numpy,BabeNovelty/numpy,simongibbons/numpy,madphysicist/numpy,mhvk/numpy,anntzer/numpy,sonnyhu/numpy,moreati/numpy,grlee77/numpy,charris/numpy,sigma-random/numpy,chiffa/numpy,rajathkumarmp/numpy,naritta/numpy,sinhrks/numpy,sonnyhu/numpy,BMJHayward/numpy,has2k1/numpy,jschueller/numpy,njase/numpy,mingwpy/numpy,simongibbons/numpy,ajdawson/numpy,yiakwy/numpy,abalkin/numpy,ajdawson/numpy,KaelChen/numpy,felipebetancur/numpy,b-carter/numpy,BabeNovelty/numpy,Yusa95/numpy,ContinuumIO/numpy,andsor/numpy,pizzathief/numpy,ESSS/numpy,joferkington/numpy,rgommers/numpy,naritta/numpy,chatcannon/numpy,jankoslavic/numpy,ogrisel/numpy,grlee77/numpy,tdsmith/numpy,skymanaditya1/numpy,nguyentu1602/numpy,mingwpy/numpy,embray/numpy,rgommers/numpy,simongibbons/numpy,mhvk/numpy,rhythmsosad/numpy,Linkid/numpy,nguyentu1602/numpy,MaPePeR/numpy,MaPePeR/numpy,MichaelAquilina/numpy,mortada/numpy,rmcgibbo/numpy,utke1/numpy,skymanaditya1/numpy,joferkington/numpy,AustereCuriosity/numpy,cjermain/numpy,dch312/numpy,GaZ3ll3/numpy,CMartelLML/numpy,pdebuyl/numpy,gfyoung/numpy,cowlicks/numpy,mindw/numpy,rmcgibbo/numpy,ViralLeadership/numpy,grlee77/numpy,anntzer/numpy,AustereCuriosity/numpy,empeeu/numpy,dimasad/numpy,pbrod/numpy,stuarteberg/numpy,musically-ut/numpy,sonnyhu/numpy,solarjoe/numpy,bertrand-l/numpy,embray/numpy,kirillzhuravlev/numpy,rudimeier/numpy,argriffing/numpy,leifdenby/numpy,jschueller/numpy,rajathkumarmp/numpy,cjermain/numpy,endolith/numpy,gfyoung/numpy,pizzathief/numpy,dch312/numpy,felipebetancur/numpy,KaelChen/numpy,mindw/numpy,numpy/numpy,kirillzhuravlev/numpy,brandon-rhodes/numpy,ekalosak/numpy,nbeaver/numpy,hainm/numpy,maniteja123/numpy,has2k1/numpy,musically-ut/numpy,drasmuss/numpy,bmorris3/numpy,pbrod/numpy,Eric89GXL/numpy,MSeifert04/numpy,KaelChen/numpy,dimasad/numpy,ogrisel/numpy,CMartelLML/numpy,musically-ut/numpy,GaZ3ll3/numpy,andsor/numpy,bertrand-l/numpy,mathdd/numpy,madphysicist/numpy,pdebuyl/numpy,jonathanunderwood/numpy,bmorris3/numpy,ewmoore/numpy,gmcastil/numpy,pyparallel/numpy,ekalosak/numpy,ChristopherHogan/numpy,abalkin/numpy,MichaelAquilina/numpy,WarrenWeckesser/numpy,Srisai85/numpy,ddasilva/numpy,gfyoung/numpy,dch312/numpy,ogrisel/numpy,naritta/numpy,Linkid/numpy,andsor/numpy,shoyer/numpy,mhvk/numpy,cjermain/numpy,ajdawson/numpy,Dapid/numpy,MaPePeR/numpy,sinhrks/numpy,hainm/numpy,moreati/numpy,MaPePeR/numpy,kiwifb/numpy,MichaelAquilina/numpy,Linkid/numpy,trankmichael/numpy,jankoslavic/numpy,ogrisel/numpy,endolith/numpy,immerrr/numpy,rhythmsosad/numpy,tdsmith/numpy,tynn/numpy,chiffa/numpy,madphysicist/numpy,yiakwy/numpy,gmcastil/numpy,mwiebe/numpy,larsmans/numpy,mattip/numpy,WarrenWeckesser/numpy,endolith/numpy,ewmoore/numpy,seberg/numpy,chatcannon/numpy,pizzathief/numpy,jonathanunderwood/numpy,ewmoore/numpy,cjermain/numpy,abalkin/numpy,Eric89GXL/numpy,WillieMaddox/numpy,Yusa95/numpy,charris/numpy,sinhrks/numpy,groutr/numpy,cowlicks/numpy,Dapid/numpy,dato-code/numpy,trankmichael/numpy,skymanaditya1/numpy,SunghanKim/numpy,WillieMaddox/numpy,embray/numpy
|
TST: Add test for printing of scalar values.
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, assert_, run_module_suite
class TestRealScalars(TestCase):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
actual = [str(f(c)) for c in svals for f in styps]
wanted = [
'0.0', '0.0', '0.0', '0.0',
'-0.0', '-0.0', '-0.0', '-0.0',
'1.0', '1.0', '1.0', '1.0',
'-1.0', '-1.0', '-1.0', '-1.0',
'inf', 'inf', 'inf', 'inf',
'-inf', '-inf', '-inf', '-inf',
'nan', 'nan', 'nan', 'nan']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>TST: Add test for printing of scalar values.<commit_after>
|
# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, assert_, run_module_suite
class TestRealScalars(TestCase):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
actual = [str(f(c)) for c in svals for f in styps]
wanted = [
'0.0', '0.0', '0.0', '0.0',
'-0.0', '-0.0', '-0.0', '-0.0',
'1.0', '1.0', '1.0', '1.0',
'-1.0', '-1.0', '-1.0', '-1.0',
'inf', 'inf', 'inf', 'inf',
'-inf', '-inf', '-inf', '-inf',
'nan', 'nan', 'nan', 'nan']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
|
TST: Add test for printing of scalar values.# -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, assert_, run_module_suite
class TestRealScalars(TestCase):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
actual = [str(f(c)) for c in svals for f in styps]
wanted = [
'0.0', '0.0', '0.0', '0.0',
'-0.0', '-0.0', '-0.0', '-0.0',
'1.0', '1.0', '1.0', '1.0',
'-1.0', '-1.0', '-1.0', '-1.0',
'inf', 'inf', 'inf', 'inf',
'-inf', '-inf', '-inf', '-inf',
'nan', 'nan', 'nan', 'nan']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>TST: Add test for printing of scalar values.<commit_after># -*- coding: utf-8 -*-
""" Test printing of scalar types.
"""
import numpy as np
from numpy.testing import TestCase, assert_, run_module_suite
class TestRealScalars(TestCase):
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
actual = [str(f(c)) for c in svals for f in styps]
wanted = [
'0.0', '0.0', '0.0', '0.0',
'-0.0', '-0.0', '-0.0', '-0.0',
'1.0', '1.0', '1.0', '1.0',
'-1.0', '-1.0', '-1.0', '-1.0',
'inf', 'inf', 'inf', 'inf',
'-inf', '-inf', '-inf', '-inf',
'nan', 'nan', 'nan', 'nan']
for res, val in zip(actual, wanted):
assert_(res == val)
if __name__ == "__main__":
run_module_suite()
|
|
f293d3d6aff79f424ab290347f99b85ca993e196
|
obfsproxy/common/transport_config.py
|
obfsproxy/common/transport_config.py
|
# -*- coding: utf-8 -*-
"""
Provides a class which represents a pluggable transport's configuration.
"""
class TransportConfig( object ):
"""
This class embeds configuration options for pluggable transport modules.
The options are set by obfsproxy and then passed to the transport's class
constructor. The pluggable transport might want to use these options but
does not have to. An example of such an option is the state location which
can be used by the pluggable transport to store persistent information.
"""
def __init__( self ):
"""
Initialise a `TransportConfig' object.
"""
self.stateLocation = None
def setStateLocation( self, stateLocation ):
"""
Set the given `stateLocation'.
"""
self.stateLocation = stateLocation
def getStateLocation( self ):
"""
Return the stored `stateLocation'.
"""
return self.stateLocation
def __str__( self ):
"""
Return a string representation of the `TransportConfig' instance.
"""
return str(vars(self))
|
Add a `TransportConfig' class which should contain configuration options (such as the state location) meant for pluggable transport modules.
|
Add a `TransportConfig' class which should contain configuration options (such
as the state location) meant for pluggable transport modules.
|
Python
|
bsd-3-clause
|
david415/obfsproxy,catinred2/obfsproxy,infinity0/obfsproxy,Yawning/obfsproxy-wfpadtools,sunsong/obfsproxy,isislovecruft/obfsproxy,qdzheng/obfsproxy,masterkorp/obfsproxy,Yawning/obfsproxy,NullHypothesis/obfsproxy
|
Add a `TransportConfig' class which should contain configuration options (such
as the state location) meant for pluggable transport modules.
|
# -*- coding: utf-8 -*-
"""
Provides a class which represents a pluggable transport's configuration.
"""
class TransportConfig( object ):
"""
This class embeds configuration options for pluggable transport modules.
The options are set by obfsproxy and then passed to the transport's class
constructor. The pluggable transport might want to use these options but
does not have to. An example of such an option is the state location which
can be used by the pluggable transport to store persistent information.
"""
def __init__( self ):
"""
Initialise a `TransportConfig' object.
"""
self.stateLocation = None
def setStateLocation( self, stateLocation ):
"""
Set the given `stateLocation'.
"""
self.stateLocation = stateLocation
def getStateLocation( self ):
"""
Return the stored `stateLocation'.
"""
return self.stateLocation
def __str__( self ):
"""
Return a string representation of the `TransportConfig' instance.
"""
return str(vars(self))
|
<commit_before><commit_msg>Add a `TransportConfig' class which should contain configuration options (such
as the state location) meant for pluggable transport modules.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Provides a class which represents a pluggable transport's configuration.
"""
class TransportConfig( object ):
"""
This class embeds configuration options for pluggable transport modules.
The options are set by obfsproxy and then passed to the transport's class
constructor. The pluggable transport might want to use these options but
does not have to. An example of such an option is the state location which
can be used by the pluggable transport to store persistent information.
"""
def __init__( self ):
"""
Initialise a `TransportConfig' object.
"""
self.stateLocation = None
def setStateLocation( self, stateLocation ):
"""
Set the given `stateLocation'.
"""
self.stateLocation = stateLocation
def getStateLocation( self ):
"""
Return the stored `stateLocation'.
"""
return self.stateLocation
def __str__( self ):
"""
Return a string representation of the `TransportConfig' instance.
"""
return str(vars(self))
|
Add a `TransportConfig' class which should contain configuration options (such
as the state location) meant for pluggable transport modules.# -*- coding: utf-8 -*-
"""
Provides a class which represents a pluggable transport's configuration.
"""
class TransportConfig( object ):
"""
This class embeds configuration options for pluggable transport modules.
The options are set by obfsproxy and then passed to the transport's class
constructor. The pluggable transport might want to use these options but
does not have to. An example of such an option is the state location which
can be used by the pluggable transport to store persistent information.
"""
def __init__( self ):
"""
Initialise a `TransportConfig' object.
"""
self.stateLocation = None
def setStateLocation( self, stateLocation ):
"""
Set the given `stateLocation'.
"""
self.stateLocation = stateLocation
def getStateLocation( self ):
"""
Return the stored `stateLocation'.
"""
return self.stateLocation
def __str__( self ):
"""
Return a string representation of the `TransportConfig' instance.
"""
return str(vars(self))
|
<commit_before><commit_msg>Add a `TransportConfig' class which should contain configuration options (such
as the state location) meant for pluggable transport modules.<commit_after># -*- coding: utf-8 -*-
"""
Provides a class which represents a pluggable transport's configuration.
"""
class TransportConfig( object ):
"""
This class embeds configuration options for pluggable transport modules.
The options are set by obfsproxy and then passed to the transport's class
constructor. The pluggable transport might want to use these options but
does not have to. An example of such an option is the state location which
can be used by the pluggable transport to store persistent information.
"""
def __init__( self ):
"""
Initialise a `TransportConfig' object.
"""
self.stateLocation = None
def setStateLocation( self, stateLocation ):
"""
Set the given `stateLocation'.
"""
self.stateLocation = stateLocation
def getStateLocation( self ):
"""
Return the stored `stateLocation'.
"""
return self.stateLocation
def __str__( self ):
"""
Return a string representation of the `TransportConfig' instance.
"""
return str(vars(self))
|
|
1ddae2ddab2f0681d52f46baf2f0ec3926508d1b
|
bin/cwtv_json_to_srt.py
|
bin/cwtv_json_to_srt.py
|
#!/usr/bin/env python3
# encoding: utf-8
'''
This script will convert a close captioning subtitle .json file as found on the
CWTV streaming website, to a regular .srt for use in common media players.
.json example:
{
"endTime": 10.04,
"guid": "ffffffff-0000-1111-2222-aaaaaaaaaaaa",
"hitType": "tag",
"id": "abc123abc123abc123abc123",
"metadata": {
"ID": "",
"Language": "en",
"Text": "All that glitters"
},
"segmentTypeId": "abc123abc123abc123abc123",
"startTime": 1.002999,
"subTrack": null,
"tags": [],
"track": "Closed Captioning"
}
.srt example:
1
00:00:10,500 --> 00:00:13,000
All that glitters
'''
from __future__ import print_function
import json
import sys
def seconds_to_hms(seconds):
whole_seconds, microseconds = str(seconds).split('.')
whole_seconds = int(whole_seconds)
# The seconds part is just an int, but we need to turn the microseconds part
# into milliseconds. Subtracting seconds from this value will give us a
# float between 0.000000 and 0.999999, and multiplying that by a million is
# the number of microseconds as an int, from which we can have milliseconds.
microseconds = int((float(seconds) - whole_seconds) * 1000000)
milliseconds = int(microseconds / 1000) + whole_seconds * 1000
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '{:02d}:{:02d}:{:02d},{}'.format(hours, minutes, seconds,
str(milliseconds * 10).ljust(3, '0'))
if len(sys.argv) != 2:
print('You must provide a .json input file.')
sys.exit(1)
with open(sys.argv[1], 'rU') as f:
for index, item in enumerate(json.load(f)):
text = item['metadata']['Text']
start = seconds_to_hms(item['startTime'])
end = seconds_to_hms(item['endTime'])
print('{}\n{} --> {}\n{}\n'.format(index + 1, start, end, text))
|
Add CWTV .json to .srt script.
|
Add CWTV .json to .srt script.
|
Python
|
mit
|
alimony/dotfiles,alimony/dotfiles
|
Add CWTV .json to .srt script.
|
#!/usr/bin/env python3
# encoding: utf-8
'''
This script will convert a close captioning subtitle .json file as found on the
CWTV streaming website, to a regular .srt for use in common media players.
.json example:
{
"endTime": 10.04,
"guid": "ffffffff-0000-1111-2222-aaaaaaaaaaaa",
"hitType": "tag",
"id": "abc123abc123abc123abc123",
"metadata": {
"ID": "",
"Language": "en",
"Text": "All that glitters"
},
"segmentTypeId": "abc123abc123abc123abc123",
"startTime": 1.002999,
"subTrack": null,
"tags": [],
"track": "Closed Captioning"
}
.srt example:
1
00:00:10,500 --> 00:00:13,000
All that glitters
'''
from __future__ import print_function
import json
import sys
def seconds_to_hms(seconds):
whole_seconds, microseconds = str(seconds).split('.')
whole_seconds = int(whole_seconds)
# The seconds part is just an int, but we need to turn the microseconds part
# into milliseconds. Subtracting seconds from this value will give us a
# float between 0.000000 and 0.999999, and multiplying that by a million is
# the number of microseconds as an int, from which we can have milliseconds.
microseconds = int((float(seconds) - whole_seconds) * 1000000)
milliseconds = int(microseconds / 1000) + whole_seconds * 1000
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '{:02d}:{:02d}:{:02d},{}'.format(hours, minutes, seconds,
str(milliseconds * 10).ljust(3, '0'))
if len(sys.argv) != 2:
print('You must provide a .json input file.')
sys.exit(1)
with open(sys.argv[1], 'rU') as f:
for index, item in enumerate(json.load(f)):
text = item['metadata']['Text']
start = seconds_to_hms(item['startTime'])
end = seconds_to_hms(item['endTime'])
print('{}\n{} --> {}\n{}\n'.format(index + 1, start, end, text))
|
<commit_before><commit_msg>Add CWTV .json to .srt script.<commit_after>
|
#!/usr/bin/env python3
# encoding: utf-8
'''
This script will convert a close captioning subtitle .json file as found on the
CWTV streaming website, to a regular .srt for use in common media players.
.json example:
{
"endTime": 10.04,
"guid": "ffffffff-0000-1111-2222-aaaaaaaaaaaa",
"hitType": "tag",
"id": "abc123abc123abc123abc123",
"metadata": {
"ID": "",
"Language": "en",
"Text": "All that glitters"
},
"segmentTypeId": "abc123abc123abc123abc123",
"startTime": 1.002999,
"subTrack": null,
"tags": [],
"track": "Closed Captioning"
}
.srt example:
1
00:00:10,500 --> 00:00:13,000
All that glitters
'''
from __future__ import print_function
import json
import sys
def seconds_to_hms(seconds):
whole_seconds, microseconds = str(seconds).split('.')
whole_seconds = int(whole_seconds)
# The seconds part is just an int, but we need to turn the microseconds part
# into milliseconds. Subtracting seconds from this value will give us a
# float between 0.000000 and 0.999999, and multiplying that by a million is
# the number of microseconds as an int, from which we can have milliseconds.
microseconds = int((float(seconds) - whole_seconds) * 1000000)
milliseconds = int(microseconds / 1000) + whole_seconds * 1000
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '{:02d}:{:02d}:{:02d},{}'.format(hours, minutes, seconds,
str(milliseconds * 10).ljust(3, '0'))
if len(sys.argv) != 2:
print('You must provide a .json input file.')
sys.exit(1)
with open(sys.argv[1], 'rU') as f:
for index, item in enumerate(json.load(f)):
text = item['metadata']['Text']
start = seconds_to_hms(item['startTime'])
end = seconds_to_hms(item['endTime'])
print('{}\n{} --> {}\n{}\n'.format(index + 1, start, end, text))
|
Add CWTV .json to .srt script.#!/usr/bin/env python3
# encoding: utf-8
'''
This script will convert a close captioning subtitle .json file as found on the
CWTV streaming website, to a regular .srt for use in common media players.
.json example:
{
"endTime": 10.04,
"guid": "ffffffff-0000-1111-2222-aaaaaaaaaaaa",
"hitType": "tag",
"id": "abc123abc123abc123abc123",
"metadata": {
"ID": "",
"Language": "en",
"Text": "All that glitters"
},
"segmentTypeId": "abc123abc123abc123abc123",
"startTime": 1.002999,
"subTrack": null,
"tags": [],
"track": "Closed Captioning"
}
.srt example:
1
00:00:10,500 --> 00:00:13,000
All that glitters
'''
from __future__ import print_function
import json
import sys
def seconds_to_hms(seconds):
whole_seconds, microseconds = str(seconds).split('.')
whole_seconds = int(whole_seconds)
# The seconds part is just an int, but we need to turn the microseconds part
# into milliseconds. Subtracting seconds from this value will give us a
# float between 0.000000 and 0.999999, and multiplying that by a million is
# the number of microseconds as an int, from which we can have milliseconds.
microseconds = int((float(seconds) - whole_seconds) * 1000000)
milliseconds = int(microseconds / 1000) + whole_seconds * 1000
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '{:02d}:{:02d}:{:02d},{}'.format(hours, minutes, seconds,
str(milliseconds * 10).ljust(3, '0'))
if len(sys.argv) != 2:
print('You must provide a .json input file.')
sys.exit(1)
with open(sys.argv[1], 'rU') as f:
for index, item in enumerate(json.load(f)):
text = item['metadata']['Text']
start = seconds_to_hms(item['startTime'])
end = seconds_to_hms(item['endTime'])
print('{}\n{} --> {}\n{}\n'.format(index + 1, start, end, text))
|
<commit_before><commit_msg>Add CWTV .json to .srt script.<commit_after>#!/usr/bin/env python3
# encoding: utf-8
'''
This script will convert a close captioning subtitle .json file as found on the
CWTV streaming website, to a regular .srt for use in common media players.
.json example:
{
"endTime": 10.04,
"guid": "ffffffff-0000-1111-2222-aaaaaaaaaaaa",
"hitType": "tag",
"id": "abc123abc123abc123abc123",
"metadata": {
"ID": "",
"Language": "en",
"Text": "All that glitters"
},
"segmentTypeId": "abc123abc123abc123abc123",
"startTime": 1.002999,
"subTrack": null,
"tags": [],
"track": "Closed Captioning"
}
.srt example:
1
00:00:10,500 --> 00:00:13,000
All that glitters
'''
from __future__ import print_function
import json
import sys
def seconds_to_hms(seconds):
whole_seconds, microseconds = str(seconds).split('.')
whole_seconds = int(whole_seconds)
# The seconds part is just an int, but we need to turn the microseconds part
# into milliseconds. Subtracting seconds from this value will give us a
# float between 0.000000 and 0.999999, and multiplying that by a million is
# the number of microseconds as an int, from which we can have milliseconds.
microseconds = int((float(seconds) - whole_seconds) * 1000000)
milliseconds = int(microseconds / 1000) + whole_seconds * 1000
seconds, milliseconds = divmod(milliseconds, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return '{:02d}:{:02d}:{:02d},{}'.format(hours, minutes, seconds,
str(milliseconds * 10).ljust(3, '0'))
if len(sys.argv) != 2:
print('You must provide a .json input file.')
sys.exit(1)
with open(sys.argv[1], 'rU') as f:
for index, item in enumerate(json.load(f)):
text = item['metadata']['Text']
start = seconds_to_hms(item['startTime'])
end = seconds_to_hms(item['endTime'])
print('{}\n{} --> {}\n{}\n'.format(index + 1, start, end, text))
|
|
9502c0e816097cf65fa92c6dd255c3356cf20964
|
test/api_class_repr_test.py
|
test/api_class_repr_test.py
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import pytest
from .. import jenkins_api
from .framework import api_select
from .cfg import ApiType
@pytest.mark.not_apis(ApiType.MOCK, ApiType.SCRIPT)
def test_api_class_repr_job(api_type):
api = api_select.api(__file__, api_type, login=True)
job = jenkins_api.ApiJob(api, {}, 'my-job')
jrd = eval(repr(job))
assert jrd == {'name': 'my-job', 'dct': {}}
invocation = jenkins_api.Invocation(job, "http://dummy", 'hello')
assert repr(invocation) == "Invocation: 'http://dummy' None None"
|
Test jenkis_api ApiJob and Invocation classes __repr__ methods
|
Test jenkis_api ApiJob and Invocation classes __repr__ methods
|
Python
|
bsd-3-clause
|
lhupfeldt/jenkinsflow,lhupfeldt/jenkinsflow,lechat/jenkinsflow,lhupfeldt/jenkinsflow,lechat/jenkinsflow,lechat/jenkinsflow,lechat/jenkinsflow,lhupfeldt/jenkinsflow
|
Test jenkis_api ApiJob and Invocation classes __repr__ methods
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import pytest
from .. import jenkins_api
from .framework import api_select
from .cfg import ApiType
@pytest.mark.not_apis(ApiType.MOCK, ApiType.SCRIPT)
def test_api_class_repr_job(api_type):
api = api_select.api(__file__, api_type, login=True)
job = jenkins_api.ApiJob(api, {}, 'my-job')
jrd = eval(repr(job))
assert jrd == {'name': 'my-job', 'dct': {}}
invocation = jenkins_api.Invocation(job, "http://dummy", 'hello')
assert repr(invocation) == "Invocation: 'http://dummy' None None"
|
<commit_before><commit_msg>Test jenkis_api ApiJob and Invocation classes __repr__ methods<commit_after>
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import pytest
from .. import jenkins_api
from .framework import api_select
from .cfg import ApiType
@pytest.mark.not_apis(ApiType.MOCK, ApiType.SCRIPT)
def test_api_class_repr_job(api_type):
api = api_select.api(__file__, api_type, login=True)
job = jenkins_api.ApiJob(api, {}, 'my-job')
jrd = eval(repr(job))
assert jrd == {'name': 'my-job', 'dct': {}}
invocation = jenkins_api.Invocation(job, "http://dummy", 'hello')
assert repr(invocation) == "Invocation: 'http://dummy' None None"
|
Test jenkis_api ApiJob and Invocation classes __repr__ methods# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import pytest
from .. import jenkins_api
from .framework import api_select
from .cfg import ApiType
@pytest.mark.not_apis(ApiType.MOCK, ApiType.SCRIPT)
def test_api_class_repr_job(api_type):
api = api_select.api(__file__, api_type, login=True)
job = jenkins_api.ApiJob(api, {}, 'my-job')
jrd = eval(repr(job))
assert jrd == {'name': 'my-job', 'dct': {}}
invocation = jenkins_api.Invocation(job, "http://dummy", 'hello')
assert repr(invocation) == "Invocation: 'http://dummy' None None"
|
<commit_before><commit_msg>Test jenkis_api ApiJob and Invocation classes __repr__ methods<commit_after># Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from __future__ import print_function
import pytest
from .. import jenkins_api
from .framework import api_select
from .cfg import ApiType
@pytest.mark.not_apis(ApiType.MOCK, ApiType.SCRIPT)
def test_api_class_repr_job(api_type):
api = api_select.api(__file__, api_type, login=True)
job = jenkins_api.ApiJob(api, {}, 'my-job')
jrd = eval(repr(job))
assert jrd == {'name': 'my-job', 'dct': {}}
invocation = jenkins_api.Invocation(job, "http://dummy", 'hello')
assert repr(invocation) == "Invocation: 'http://dummy' None None"
|
|
9655b7349f48fe57a72897800d353aa1df4d5783
|
Problems/fibMemoization.py
|
Problems/fibMemoization.py
|
#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[4, 3],
[7, 13]
]
for item in tests:
try:
temp_result = fib_memoization(item[0])
if temp_result == item[1]:
print('PASSED: fib_memoization({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: fib_memoization({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def fib_memoization(n, d={1:1, 2:1}):
'''
Calculates the nth Fibonacci number
Input: n is integer, d is dictionary of previously calculated Fibonacci numbers
Output: integer, the nth Fibonacci number
'''
# Input checks
if type(n) is not int or type(d) is not dict:
raise TypeError('n must be an integer, d must be a dictionary')
if n in d:
return d[n]
else:
result = fib_memoization(n-1, d) + fib_memoization(n-2, d)
d[n] = result
return result
if __name__ == '__main__':
main()
|
Add memoization version to find nth Fibonacci number
|
Add memoization version to find nth Fibonacci number
|
Python
|
mit
|
HKuz/Test_Code
|
Add memoization version to find nth Fibonacci number
|
#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[4, 3],
[7, 13]
]
for item in tests:
try:
temp_result = fib_memoization(item[0])
if temp_result == item[1]:
print('PASSED: fib_memoization({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: fib_memoization({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def fib_memoization(n, d={1:1, 2:1}):
'''
Calculates the nth Fibonacci number
Input: n is integer, d is dictionary of previously calculated Fibonacci numbers
Output: integer, the nth Fibonacci number
'''
# Input checks
if type(n) is not int or type(d) is not dict:
raise TypeError('n must be an integer, d must be a dictionary')
if n in d:
return d[n]
else:
result = fib_memoization(n-1, d) + fib_memoization(n-2, d)
d[n] = result
return result
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add memoization version to find nth Fibonacci number<commit_after>
|
#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[4, 3],
[7, 13]
]
for item in tests:
try:
temp_result = fib_memoization(item[0])
if temp_result == item[1]:
print('PASSED: fib_memoization({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: fib_memoization({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def fib_memoization(n, d={1:1, 2:1}):
'''
Calculates the nth Fibonacci number
Input: n is integer, d is dictionary of previously calculated Fibonacci numbers
Output: integer, the nth Fibonacci number
'''
# Input checks
if type(n) is not int or type(d) is not dict:
raise TypeError('n must be an integer, d must be a dictionary')
if n in d:
return d[n]
else:
result = fib_memoization(n-1, d) + fib_memoization(n-2, d)
d[n] = result
return result
if __name__ == '__main__':
main()
|
Add memoization version to find nth Fibonacci number#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[4, 3],
[7, 13]
]
for item in tests:
try:
temp_result = fib_memoization(item[0])
if temp_result == item[1]:
print('PASSED: fib_memoization({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: fib_memoization({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def fib_memoization(n, d={1:1, 2:1}):
'''
Calculates the nth Fibonacci number
Input: n is integer, d is dictionary of previously calculated Fibonacci numbers
Output: integer, the nth Fibonacci number
'''
# Input checks
if type(n) is not int or type(d) is not dict:
raise TypeError('n must be an integer, d must be a dictionary')
if n in d:
return d[n]
else:
result = fib_memoization(n-1, d) + fib_memoization(n-2, d)
d[n] = result
return result
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add memoization version to find nth Fibonacci number<commit_after>#!/usr/local/bin/python3
def main():
# Test suite
tests = [
[None, None], # Should throw a TypeError
[4, 3],
[7, 13]
]
for item in tests:
try:
temp_result = fib_memoization(item[0])
if temp_result == item[1]:
print('PASSED: fib_memoization({}) returned {}'.format(item[0], temp_result))
else:
print('FAILED: fib_memoization({}) returned {}, should have returned {}'.format(item[0], temp_result, item[1]))
except TypeError:
print('PASSED TypeError test')
return 0
def fib_memoization(n, d={1:1, 2:1}):
'''
Calculates the nth Fibonacci number
Input: n is integer, d is dictionary of previously calculated Fibonacci numbers
Output: integer, the nth Fibonacci number
'''
# Input checks
if type(n) is not int or type(d) is not dict:
raise TypeError('n must be an integer, d must be a dictionary')
if n in d:
return d[n]
else:
result = fib_memoization(n-1, d) + fib_memoization(n-2, d)
d[n] = result
return result
if __name__ == '__main__':
main()
|
|
8f117d62a60699b3d6b87e15962fca43339616a6
|
new_src/plot_multi_curves.py
|
new_src/plot_multi_curves.py
|
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parent_dir = os.path.dirname(os.getcwd())
models_dir = os.path.join(parent_dir, "models")
csv_name = "learning_curve.csv"
def plot_multi_curves(dfs, labels,
figure_name="learning_curves",
alphas=[0.6, 0.6, 0.6, 1.0]):
metrics = ["acc", "loss", "val_acc", "val_loss"]
for metric in metrics:
plt.figure(num=figure_name + "_" + metric)
for df, label, alpha in zip(dfs, labels, alphas):
curve = df[metric].values.tolist()
num = len(curve)
x = np.arange(1, num + 1)
if alpha == 1.0:
plt.plot(x, curve, color="k", label=label, alpha=alpha)
else:
plt.plot(x, curve, label=label, alpha=alpha)
if "loss" in metric:
legend_loc = 1
ylim = [0.0, 2.5]
ylabel = "Loss"
else:
legend_loc = 4
ylim = [0.4, 1.0]
ylabel = "Accuracy"
plt.ylim(ylim)
plt.xlim([0, num])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Epochs", fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.legend(fontsize=16, loc=legend_loc, ncol=2)
plt.grid("on", linestyle="--", linewidth=0.5, alpha=0.5)
plt.tight_layout()
plt.show()
return
lr_models_names = ["model-test-lr-2", "model-test-lr-3",
"model-test-lr-4", "model-afm-max-adam-5-5"]
lr_labels = ["1e-3", "1e-4", "1e-5", "1e-3~1e-4~1e-5"]
lr_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in lr_models_names]
plot_multi_curves(lr_dfs, lr_labels, "lr")
bs_models_names = ["model-test-bs-1", "model-test-bs-2",
"model-test-bs-3", "model-afm-max-adam-5-5"]
bs_labels = ["4", "8", "12", "16"]
bs_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in bs_models_names]
plot_multi_curves(bs_dfs, bs_labels, "bs")
|
Add script to plot metrics of dataset
|
Add script to plot metrics of dataset
|
Python
|
mit
|
quqixun/BrainTumorClassification,quqixun/BrainTumorClassification
|
Add script to plot metrics of dataset
|
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parent_dir = os.path.dirname(os.getcwd())
models_dir = os.path.join(parent_dir, "models")
csv_name = "learning_curve.csv"
def plot_multi_curves(dfs, labels,
figure_name="learning_curves",
alphas=[0.6, 0.6, 0.6, 1.0]):
metrics = ["acc", "loss", "val_acc", "val_loss"]
for metric in metrics:
plt.figure(num=figure_name + "_" + metric)
for df, label, alpha in zip(dfs, labels, alphas):
curve = df[metric].values.tolist()
num = len(curve)
x = np.arange(1, num + 1)
if alpha == 1.0:
plt.plot(x, curve, color="k", label=label, alpha=alpha)
else:
plt.plot(x, curve, label=label, alpha=alpha)
if "loss" in metric:
legend_loc = 1
ylim = [0.0, 2.5]
ylabel = "Loss"
else:
legend_loc = 4
ylim = [0.4, 1.0]
ylabel = "Accuracy"
plt.ylim(ylim)
plt.xlim([0, num])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Epochs", fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.legend(fontsize=16, loc=legend_loc, ncol=2)
plt.grid("on", linestyle="--", linewidth=0.5, alpha=0.5)
plt.tight_layout()
plt.show()
return
lr_models_names = ["model-test-lr-2", "model-test-lr-3",
"model-test-lr-4", "model-afm-max-adam-5-5"]
lr_labels = ["1e-3", "1e-4", "1e-5", "1e-3~1e-4~1e-5"]
lr_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in lr_models_names]
plot_multi_curves(lr_dfs, lr_labels, "lr")
bs_models_names = ["model-test-bs-1", "model-test-bs-2",
"model-test-bs-3", "model-afm-max-adam-5-5"]
bs_labels = ["4", "8", "12", "16"]
bs_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in bs_models_names]
plot_multi_curves(bs_dfs, bs_labels, "bs")
|
<commit_before><commit_msg>Add script to plot metrics of dataset<commit_after>
|
from __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parent_dir = os.path.dirname(os.getcwd())
models_dir = os.path.join(parent_dir, "models")
csv_name = "learning_curve.csv"
def plot_multi_curves(dfs, labels,
figure_name="learning_curves",
alphas=[0.6, 0.6, 0.6, 1.0]):
metrics = ["acc", "loss", "val_acc", "val_loss"]
for metric in metrics:
plt.figure(num=figure_name + "_" + metric)
for df, label, alpha in zip(dfs, labels, alphas):
curve = df[metric].values.tolist()
num = len(curve)
x = np.arange(1, num + 1)
if alpha == 1.0:
plt.plot(x, curve, color="k", label=label, alpha=alpha)
else:
plt.plot(x, curve, label=label, alpha=alpha)
if "loss" in metric:
legend_loc = 1
ylim = [0.0, 2.5]
ylabel = "Loss"
else:
legend_loc = 4
ylim = [0.4, 1.0]
ylabel = "Accuracy"
plt.ylim(ylim)
plt.xlim([0, num])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Epochs", fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.legend(fontsize=16, loc=legend_loc, ncol=2)
plt.grid("on", linestyle="--", linewidth=0.5, alpha=0.5)
plt.tight_layout()
plt.show()
return
lr_models_names = ["model-test-lr-2", "model-test-lr-3",
"model-test-lr-4", "model-afm-max-adam-5-5"]
lr_labels = ["1e-3", "1e-4", "1e-5", "1e-3~1e-4~1e-5"]
lr_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in lr_models_names]
plot_multi_curves(lr_dfs, lr_labels, "lr")
bs_models_names = ["model-test-bs-1", "model-test-bs-2",
"model-test-bs-3", "model-afm-max-adam-5-5"]
bs_labels = ["4", "8", "12", "16"]
bs_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in bs_models_names]
plot_multi_curves(bs_dfs, bs_labels, "bs")
|
Add script to plot metrics of datasetfrom __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parent_dir = os.path.dirname(os.getcwd())
models_dir = os.path.join(parent_dir, "models")
csv_name = "learning_curve.csv"
def plot_multi_curves(dfs, labels,
figure_name="learning_curves",
alphas=[0.6, 0.6, 0.6, 1.0]):
metrics = ["acc", "loss", "val_acc", "val_loss"]
for metric in metrics:
plt.figure(num=figure_name + "_" + metric)
for df, label, alpha in zip(dfs, labels, alphas):
curve = df[metric].values.tolist()
num = len(curve)
x = np.arange(1, num + 1)
if alpha == 1.0:
plt.plot(x, curve, color="k", label=label, alpha=alpha)
else:
plt.plot(x, curve, label=label, alpha=alpha)
if "loss" in metric:
legend_loc = 1
ylim = [0.0, 2.5]
ylabel = "Loss"
else:
legend_loc = 4
ylim = [0.4, 1.0]
ylabel = "Accuracy"
plt.ylim(ylim)
plt.xlim([0, num])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Epochs", fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.legend(fontsize=16, loc=legend_loc, ncol=2)
plt.grid("on", linestyle="--", linewidth=0.5, alpha=0.5)
plt.tight_layout()
plt.show()
return
lr_models_names = ["model-test-lr-2", "model-test-lr-3",
"model-test-lr-4", "model-afm-max-adam-5-5"]
lr_labels = ["1e-3", "1e-4", "1e-5", "1e-3~1e-4~1e-5"]
lr_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in lr_models_names]
plot_multi_curves(lr_dfs, lr_labels, "lr")
bs_models_names = ["model-test-bs-1", "model-test-bs-2",
"model-test-bs-3", "model-afm-max-adam-5-5"]
bs_labels = ["4", "8", "12", "16"]
bs_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in bs_models_names]
plot_multi_curves(bs_dfs, bs_labels, "bs")
|
<commit_before><commit_msg>Add script to plot metrics of dataset<commit_after>from __future__ import print_function
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
parent_dir = os.path.dirname(os.getcwd())
models_dir = os.path.join(parent_dir, "models")
csv_name = "learning_curve.csv"
def plot_multi_curves(dfs, labels,
figure_name="learning_curves",
alphas=[0.6, 0.6, 0.6, 1.0]):
metrics = ["acc", "loss", "val_acc", "val_loss"]
for metric in metrics:
plt.figure(num=figure_name + "_" + metric)
for df, label, alpha in zip(dfs, labels, alphas):
curve = df[metric].values.tolist()
num = len(curve)
x = np.arange(1, num + 1)
if alpha == 1.0:
plt.plot(x, curve, color="k", label=label, alpha=alpha)
else:
plt.plot(x, curve, label=label, alpha=alpha)
if "loss" in metric:
legend_loc = 1
ylim = [0.0, 2.5]
ylabel = "Loss"
else:
legend_loc = 4
ylim = [0.4, 1.0]
ylabel = "Accuracy"
plt.ylim(ylim)
plt.xlim([0, num])
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel("Epochs", fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.legend(fontsize=16, loc=legend_loc, ncol=2)
plt.grid("on", linestyle="--", linewidth=0.5, alpha=0.5)
plt.tight_layout()
plt.show()
return
lr_models_names = ["model-test-lr-2", "model-test-lr-3",
"model-test-lr-4", "model-afm-max-adam-5-5"]
lr_labels = ["1e-3", "1e-4", "1e-5", "1e-3~1e-4~1e-5"]
lr_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in lr_models_names]
plot_multi_curves(lr_dfs, lr_labels, "lr")
bs_models_names = ["model-test-bs-1", "model-test-bs-2",
"model-test-bs-3", "model-afm-max-adam-5-5"]
bs_labels = ["4", "8", "12", "16"]
bs_dfs = [pd.read_csv(os.path.join(models_dir, model_name, csv_name))
for model_name in bs_models_names]
plot_multi_curves(bs_dfs, bs_labels, "bs")
|
|
43bba1633233a03be5d585a2341ba56860b93c6b
|
tests/test_finance_model.py
|
tests/test_finance_model.py
|
# Copyright (c) 2012 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import unittest
from pycroft import model
from pycroft.model import session, user, finance, _all
class Test_010_TransactionSplits(unittest.TestCase):
@classmethod
def setUpClass(cls):
session.reinit_session("sqlite://")
model.drop_db_model()
model.create_db_model()
cls.account = finance.FinanceAccount(name="Testaccount", type="EXPENSE")
cls.s = session.session
cls.s.add(cls.account)
cls.s.commit()
def tearDown(self):
super(Test_010_TransactionSplits, self).tearDown()
print "bla"
self.s.remove()
def test_0010_empty_transaction(self):
tr = finance.Transaction(message="Transaction1")
self.s.add(tr)
self.s.commit()
self.assertEqual(finance.Transaction.q.filter_by(message="Transaction1").count(), 1)
def test_0020_fail_on_unbalanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
self.s.add(sp1)
self.assertRaisesRegexp(Exception, 'Transaction "Transaction2" is not balanced!', self.s.commit)
#self.s.rollback()
def test_0030_insert_balanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
sp2 = finance.Split(amount=-100, account=self.account, transaction=tr)
self.s.add(sp1)
self.s.add(sp2)
self.s.commit()
|
Add a test to assert that transactions are balanced
|
Add a test to assert that transactions are balanced
|
Python
|
apache-2.0
|
lukasjuhrich/pycroft,lukasjuhrich/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,lukasjuhrich/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
Add a test to assert that transactions are balanced
|
# Copyright (c) 2012 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import unittest
from pycroft import model
from pycroft.model import session, user, finance, _all
class Test_010_TransactionSplits(unittest.TestCase):
@classmethod
def setUpClass(cls):
session.reinit_session("sqlite://")
model.drop_db_model()
model.create_db_model()
cls.account = finance.FinanceAccount(name="Testaccount", type="EXPENSE")
cls.s = session.session
cls.s.add(cls.account)
cls.s.commit()
def tearDown(self):
super(Test_010_TransactionSplits, self).tearDown()
print "bla"
self.s.remove()
def test_0010_empty_transaction(self):
tr = finance.Transaction(message="Transaction1")
self.s.add(tr)
self.s.commit()
self.assertEqual(finance.Transaction.q.filter_by(message="Transaction1").count(), 1)
def test_0020_fail_on_unbalanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
self.s.add(sp1)
self.assertRaisesRegexp(Exception, 'Transaction "Transaction2" is not balanced!', self.s.commit)
#self.s.rollback()
def test_0030_insert_balanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
sp2 = finance.Split(amount=-100, account=self.account, transaction=tr)
self.s.add(sp1)
self.s.add(sp2)
self.s.commit()
|
<commit_before><commit_msg>Add a test to assert that transactions are balanced<commit_after>
|
# Copyright (c) 2012 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import unittest
from pycroft import model
from pycroft.model import session, user, finance, _all
class Test_010_TransactionSplits(unittest.TestCase):
@classmethod
def setUpClass(cls):
session.reinit_session("sqlite://")
model.drop_db_model()
model.create_db_model()
cls.account = finance.FinanceAccount(name="Testaccount", type="EXPENSE")
cls.s = session.session
cls.s.add(cls.account)
cls.s.commit()
def tearDown(self):
super(Test_010_TransactionSplits, self).tearDown()
print "bla"
self.s.remove()
def test_0010_empty_transaction(self):
tr = finance.Transaction(message="Transaction1")
self.s.add(tr)
self.s.commit()
self.assertEqual(finance.Transaction.q.filter_by(message="Transaction1").count(), 1)
def test_0020_fail_on_unbalanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
self.s.add(sp1)
self.assertRaisesRegexp(Exception, 'Transaction "Transaction2" is not balanced!', self.s.commit)
#self.s.rollback()
def test_0030_insert_balanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
sp2 = finance.Split(amount=-100, account=self.account, transaction=tr)
self.s.add(sp1)
self.s.add(sp2)
self.s.commit()
|
Add a test to assert that transactions are balanced# Copyright (c) 2012 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import unittest
from pycroft import model
from pycroft.model import session, user, finance, _all
class Test_010_TransactionSplits(unittest.TestCase):
@classmethod
def setUpClass(cls):
session.reinit_session("sqlite://")
model.drop_db_model()
model.create_db_model()
cls.account = finance.FinanceAccount(name="Testaccount", type="EXPENSE")
cls.s = session.session
cls.s.add(cls.account)
cls.s.commit()
def tearDown(self):
super(Test_010_TransactionSplits, self).tearDown()
print "bla"
self.s.remove()
def test_0010_empty_transaction(self):
tr = finance.Transaction(message="Transaction1")
self.s.add(tr)
self.s.commit()
self.assertEqual(finance.Transaction.q.filter_by(message="Transaction1").count(), 1)
def test_0020_fail_on_unbalanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
self.s.add(sp1)
self.assertRaisesRegexp(Exception, 'Transaction "Transaction2" is not balanced!', self.s.commit)
#self.s.rollback()
def test_0030_insert_balanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
sp2 = finance.Split(amount=-100, account=self.account, transaction=tr)
self.s.add(sp1)
self.s.add(sp2)
self.s.commit()
|
<commit_before><commit_msg>Add a test to assert that transactions are balanced<commit_after># Copyright (c) 2012 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import unittest
from pycroft import model
from pycroft.model import session, user, finance, _all
class Test_010_TransactionSplits(unittest.TestCase):
@classmethod
def setUpClass(cls):
session.reinit_session("sqlite://")
model.drop_db_model()
model.create_db_model()
cls.account = finance.FinanceAccount(name="Testaccount", type="EXPENSE")
cls.s = session.session
cls.s.add(cls.account)
cls.s.commit()
def tearDown(self):
super(Test_010_TransactionSplits, self).tearDown()
print "bla"
self.s.remove()
def test_0010_empty_transaction(self):
tr = finance.Transaction(message="Transaction1")
self.s.add(tr)
self.s.commit()
self.assertEqual(finance.Transaction.q.filter_by(message="Transaction1").count(), 1)
def test_0020_fail_on_unbalanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
self.s.add(sp1)
self.assertRaisesRegexp(Exception, 'Transaction "Transaction2" is not balanced!', self.s.commit)
#self.s.rollback()
def test_0030_insert_balanced(self):
tr = finance.Transaction(message="Transaction2")
self.s.add(tr)
self.s.commit()
sp1 = finance.Split(amount=100, account=self.account, transaction=tr)
sp2 = finance.Split(amount=-100, account=self.account, transaction=tr)
self.s.add(sp1)
self.s.add(sp2)
self.s.commit()
|
|
0507bf58c7d73f91e645d64c69fcccb35542703a
|
VolumeUtilities.py
|
VolumeUtilities.py
|
#!/usr/bin/python
from snappy import *
from math import log
# Global constants:
PARI_PRECISION = 100
LINDEP_PRECISION = 50
EPSILON = 1e-12
# *** ATTENTION USER ***
# If you want to dynamicallly change the above constants for all sessions,
# or just want to keep the default values so you can revert,
# do so here! That way the values will both be used and tested in the warning system below
# User definitions:
# Enviroment setup code:
pari.set_real_precision(PARI_PRECISION)
# Test code; makes sure the constants are are sanely set:
# PARI & LINDEP PRECISION
if .9*PARI_PRECISION <= LINDEP_PRECISION or PARI_PRECISION - 3 <= LINDEP_PRECISION:
print 'WARNING: You set PARI to use '+str(PARI_PRECISION)+' places by default, with lindep calls at '+str(LINDEP_PRECISION)+' places;'
print 'This will probably read to rounding errors messing things up when lindep is used.'
print 'You should probably make sure LINDEP_PRECISION < both .9*PARI_PRECISION and PARI_PRECISION - 3 to avoid this.'
# EPSILON (vs. PARI_PRECISION)
if EPSILON <= 0:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', but it must be positive.'
print 'Try setting EPSILON=abs(EPSILON)'
if EPSILON > .01:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is really big.'
print 'PARI is capable of computing to hundreds of places of PRECISION, currently '+str(PARI_PRECISION)+', and at worst will use over 10.'
print 'You should set EPSILON to something smaller.'
if log(EPSILON) >= .9*log(PARI_PRECISION):
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is small compared to PARI\'s PRECISION limit of '+str(float(10)**(-PARI_PRECISION))+'.'
print 'Maybe you should set EPSILON a little bigger, so rounding errors from PARI don\'t get through.'
|
Add a new file for constants and other utilities to be thrown into.
|
Add a new file for constants and other utilities to be thrown into.
|
Python
|
mit
|
s-gilles/maps-reu-code
|
Add a new file for constants and other utilities to be thrown into.
|
#!/usr/bin/python
from snappy import *
from math import log
# Global constants:
PARI_PRECISION = 100
LINDEP_PRECISION = 50
EPSILON = 1e-12
# *** ATTENTION USER ***
# If you want to dynamicallly change the above constants for all sessions,
# or just want to keep the default values so you can revert,
# do so here! That way the values will both be used and tested in the warning system below
# User definitions:
# Enviroment setup code:
pari.set_real_precision(PARI_PRECISION)
# Test code; makes sure the constants are are sanely set:
# PARI & LINDEP PRECISION
if .9*PARI_PRECISION <= LINDEP_PRECISION or PARI_PRECISION - 3 <= LINDEP_PRECISION:
print 'WARNING: You set PARI to use '+str(PARI_PRECISION)+' places by default, with lindep calls at '+str(LINDEP_PRECISION)+' places;'
print 'This will probably read to rounding errors messing things up when lindep is used.'
print 'You should probably make sure LINDEP_PRECISION < both .9*PARI_PRECISION and PARI_PRECISION - 3 to avoid this.'
# EPSILON (vs. PARI_PRECISION)
if EPSILON <= 0:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', but it must be positive.'
print 'Try setting EPSILON=abs(EPSILON)'
if EPSILON > .01:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is really big.'
print 'PARI is capable of computing to hundreds of places of PRECISION, currently '+str(PARI_PRECISION)+', and at worst will use over 10.'
print 'You should set EPSILON to something smaller.'
if log(EPSILON) >= .9*log(PARI_PRECISION):
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is small compared to PARI\'s PRECISION limit of '+str(float(10)**(-PARI_PRECISION))+'.'
print 'Maybe you should set EPSILON a little bigger, so rounding errors from PARI don\'t get through.'
|
<commit_before><commit_msg>Add a new file for constants and other utilities to be thrown into.<commit_after>
|
#!/usr/bin/python
from snappy import *
from math import log
# Global constants:
PARI_PRECISION = 100
LINDEP_PRECISION = 50
EPSILON = 1e-12
# *** ATTENTION USER ***
# If you want to dynamicallly change the above constants for all sessions,
# or just want to keep the default values so you can revert,
# do so here! That way the values will both be used and tested in the warning system below
# User definitions:
# Enviroment setup code:
pari.set_real_precision(PARI_PRECISION)
# Test code; makes sure the constants are are sanely set:
# PARI & LINDEP PRECISION
if .9*PARI_PRECISION <= LINDEP_PRECISION or PARI_PRECISION - 3 <= LINDEP_PRECISION:
print 'WARNING: You set PARI to use '+str(PARI_PRECISION)+' places by default, with lindep calls at '+str(LINDEP_PRECISION)+' places;'
print 'This will probably read to rounding errors messing things up when lindep is used.'
print 'You should probably make sure LINDEP_PRECISION < both .9*PARI_PRECISION and PARI_PRECISION - 3 to avoid this.'
# EPSILON (vs. PARI_PRECISION)
if EPSILON <= 0:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', but it must be positive.'
print 'Try setting EPSILON=abs(EPSILON)'
if EPSILON > .01:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is really big.'
print 'PARI is capable of computing to hundreds of places of PRECISION, currently '+str(PARI_PRECISION)+', and at worst will use over 10.'
print 'You should set EPSILON to something smaller.'
if log(EPSILON) >= .9*log(PARI_PRECISION):
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is small compared to PARI\'s PRECISION limit of '+str(float(10)**(-PARI_PRECISION))+'.'
print 'Maybe you should set EPSILON a little bigger, so rounding errors from PARI don\'t get through.'
|
Add a new file for constants and other utilities to be thrown into.#!/usr/bin/python
from snappy import *
from math import log
# Global constants:
PARI_PRECISION = 100
LINDEP_PRECISION = 50
EPSILON = 1e-12
# *** ATTENTION USER ***
# If you want to dynamicallly change the above constants for all sessions,
# or just want to keep the default values so you can revert,
# do so here! That way the values will both be used and tested in the warning system below
# User definitions:
# Enviroment setup code:
pari.set_real_precision(PARI_PRECISION)
# Test code; makes sure the constants are are sanely set:
# PARI & LINDEP PRECISION
if .9*PARI_PRECISION <= LINDEP_PRECISION or PARI_PRECISION - 3 <= LINDEP_PRECISION:
print 'WARNING: You set PARI to use '+str(PARI_PRECISION)+' places by default, with lindep calls at '+str(LINDEP_PRECISION)+' places;'
print 'This will probably read to rounding errors messing things up when lindep is used.'
print 'You should probably make sure LINDEP_PRECISION < both .9*PARI_PRECISION and PARI_PRECISION - 3 to avoid this.'
# EPSILON (vs. PARI_PRECISION)
if EPSILON <= 0:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', but it must be positive.'
print 'Try setting EPSILON=abs(EPSILON)'
if EPSILON > .01:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is really big.'
print 'PARI is capable of computing to hundreds of places of PRECISION, currently '+str(PARI_PRECISION)+', and at worst will use over 10.'
print 'You should set EPSILON to something smaller.'
if log(EPSILON) >= .9*log(PARI_PRECISION):
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is small compared to PARI\'s PRECISION limit of '+str(float(10)**(-PARI_PRECISION))+'.'
print 'Maybe you should set EPSILON a little bigger, so rounding errors from PARI don\'t get through.'
|
<commit_before><commit_msg>Add a new file for constants and other utilities to be thrown into.<commit_after>#!/usr/bin/python
from snappy import *
from math import log
# Global constants:
PARI_PRECISION = 100
LINDEP_PRECISION = 50
EPSILON = 1e-12
# *** ATTENTION USER ***
# If you want to dynamicallly change the above constants for all sessions,
# or just want to keep the default values so you can revert,
# do so here! That way the values will both be used and tested in the warning system below
# User definitions:
# Enviroment setup code:
pari.set_real_precision(PARI_PRECISION)
# Test code; makes sure the constants are are sanely set:
# PARI & LINDEP PRECISION
if .9*PARI_PRECISION <= LINDEP_PRECISION or PARI_PRECISION - 3 <= LINDEP_PRECISION:
print 'WARNING: You set PARI to use '+str(PARI_PRECISION)+' places by default, with lindep calls at '+str(LINDEP_PRECISION)+' places;'
print 'This will probably read to rounding errors messing things up when lindep is used.'
print 'You should probably make sure LINDEP_PRECISION < both .9*PARI_PRECISION and PARI_PRECISION - 3 to avoid this.'
# EPSILON (vs. PARI_PRECISION)
if EPSILON <= 0:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', but it must be positive.'
print 'Try setting EPSILON=abs(EPSILON)'
if EPSILON > .01:
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is really big.'
print 'PARI is capable of computing to hundreds of places of PRECISION, currently '+str(PARI_PRECISION)+', and at worst will use over 10.'
print 'You should set EPSILON to something smaller.'
if log(EPSILON) >= .9*log(PARI_PRECISION):
print 'WARNING: You set EPSILON to '+str(EPSILON)+', which is small compared to PARI\'s PRECISION limit of '+str(float(10)**(-PARI_PRECISION))+'.'
print 'Maybe you should set EPSILON a little bigger, so rounding errors from PARI don\'t get through.'
|
|
26a847a9b5f9db3279849c6cc7505d41653887c9
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='typesystem',
version='0.1',
description="An abstract type system",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
keywords='types rdf',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://github.com/granoproject/typesystem',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
package_data={},
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'python-dateutil==1.5',
'normality>=0.2.2'
],
tests_require=[],
entry_points={}
)
|
Make it a python package
|
Make it a python package
|
Python
|
mit
|
pudo/typecast,influencemapping/typesystem
|
Make it a python package
|
from setuptools import setup, find_packages
setup(
name='typesystem',
version='0.1',
description="An abstract type system",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
keywords='types rdf',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://github.com/granoproject/typesystem',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
package_data={},
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'python-dateutil==1.5',
'normality>=0.2.2'
],
tests_require=[],
entry_points={}
)
|
<commit_before><commit_msg>Make it a python package<commit_after>
|
from setuptools import setup, find_packages
setup(
name='typesystem',
version='0.1',
description="An abstract type system",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
keywords='types rdf',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://github.com/granoproject/typesystem',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
package_data={},
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'python-dateutil==1.5',
'normality>=0.2.2'
],
tests_require=[],
entry_points={}
)
|
Make it a python packagefrom setuptools import setup, find_packages
setup(
name='typesystem',
version='0.1',
description="An abstract type system",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
keywords='types rdf',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://github.com/granoproject/typesystem',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
package_data={},
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'python-dateutil==1.5',
'normality>=0.2.2'
],
tests_require=[],
entry_points={}
)
|
<commit_before><commit_msg>Make it a python package<commit_after>from setuptools import setup, find_packages
setup(
name='typesystem',
version='0.1',
description="An abstract type system",
long_description="",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'
],
keywords='types rdf',
author='Friedrich Lindenberg',
author_email='friedrich@pudo.org',
url='http://github.com/granoproject/typesystem',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'test']),
namespace_packages=[],
package_data={},
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=[
'python-dateutil==1.5',
'normality>=0.2.2'
],
tests_require=[],
entry_points={}
)
|
|
f698ca84bf01ea36dafa11a9e4937d733737c08b
|
fmn/lib/tests/test_regexes.py
|
fmn/lib/tests/test_regexes.py
|
import fmn.lib
class MockContext(object):
def __init__(self, name):
self.name = name
email = MockContext('email')
irc = MockContext('irc')
class TestRegexes(fmn.lib.tests.Base):
def test_valid_emails(self):
values = [
'awesome@fedoraproject.org',
'foo+fedora.org@bar.baz',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(email, value)
def test_invalid_emails(self):
values = [
'wat',
'not@anemail.org?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(email, value)
except ValueError:
pass
else:
raise ValueError("Invalid email %r did not fail" % value)
def test_valid_ircnicks(self):
values = [
'threebean',
'awesome|guy',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(irc, value)
def test_invalid_ircnicks(self):
values = [
'?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(irc, value)
except ValueError:
pass
else:
raise ValueError("Invalid ircnick %r did not fail" % value)
|
Add some tests for our detail value validator(s).
|
Add some tests for our detail value validator(s).
|
Python
|
lgpl-2.1
|
jeremycline/fmn,jeremycline/fmn,jeremycline/fmn
|
Add some tests for our detail value validator(s).
|
import fmn.lib
class MockContext(object):
def __init__(self, name):
self.name = name
email = MockContext('email')
irc = MockContext('irc')
class TestRegexes(fmn.lib.tests.Base):
def test_valid_emails(self):
values = [
'awesome@fedoraproject.org',
'foo+fedora.org@bar.baz',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(email, value)
def test_invalid_emails(self):
values = [
'wat',
'not@anemail.org?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(email, value)
except ValueError:
pass
else:
raise ValueError("Invalid email %r did not fail" % value)
def test_valid_ircnicks(self):
values = [
'threebean',
'awesome|guy',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(irc, value)
def test_invalid_ircnicks(self):
values = [
'?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(irc, value)
except ValueError:
pass
else:
raise ValueError("Invalid ircnick %r did not fail" % value)
|
<commit_before><commit_msg>Add some tests for our detail value validator(s).<commit_after>
|
import fmn.lib
class MockContext(object):
def __init__(self, name):
self.name = name
email = MockContext('email')
irc = MockContext('irc')
class TestRegexes(fmn.lib.tests.Base):
def test_valid_emails(self):
values = [
'awesome@fedoraproject.org',
'foo+fedora.org@bar.baz',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(email, value)
def test_invalid_emails(self):
values = [
'wat',
'not@anemail.org?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(email, value)
except ValueError:
pass
else:
raise ValueError("Invalid email %r did not fail" % value)
def test_valid_ircnicks(self):
values = [
'threebean',
'awesome|guy',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(irc, value)
def test_invalid_ircnicks(self):
values = [
'?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(irc, value)
except ValueError:
pass
else:
raise ValueError("Invalid ircnick %r did not fail" % value)
|
Add some tests for our detail value validator(s).import fmn.lib
class MockContext(object):
def __init__(self, name):
self.name = name
email = MockContext('email')
irc = MockContext('irc')
class TestRegexes(fmn.lib.tests.Base):
def test_valid_emails(self):
values = [
'awesome@fedoraproject.org',
'foo+fedora.org@bar.baz',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(email, value)
def test_invalid_emails(self):
values = [
'wat',
'not@anemail.org?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(email, value)
except ValueError:
pass
else:
raise ValueError("Invalid email %r did not fail" % value)
def test_valid_ircnicks(self):
values = [
'threebean',
'awesome|guy',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(irc, value)
def test_invalid_ircnicks(self):
values = [
'?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(irc, value)
except ValueError:
pass
else:
raise ValueError("Invalid ircnick %r did not fail" % value)
|
<commit_before><commit_msg>Add some tests for our detail value validator(s).<commit_after>import fmn.lib
class MockContext(object):
def __init__(self, name):
self.name = name
email = MockContext('email')
irc = MockContext('irc')
class TestRegexes(fmn.lib.tests.Base):
def test_valid_emails(self):
values = [
'awesome@fedoraproject.org',
'foo+fedora.org@bar.baz',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(email, value)
def test_invalid_emails(self):
values = [
'wat',
'not@anemail.org?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(email, value)
except ValueError:
pass
else:
raise ValueError("Invalid email %r did not fail" % value)
def test_valid_ircnicks(self):
values = [
'threebean',
'awesome|guy',
]
for value in values:
# None of these should raise exceptions
fmn.lib.validate_detail_value(irc, value)
def test_invalid_ircnicks(self):
values = [
'?',
]
for value in values:
# All of these should raise exceptions
try:
fmn.lib.validate_detail_value(irc, value)
except ValueError:
pass
else:
raise ValueError("Invalid ircnick %r did not fail" % value)
|
|
96c4e54ed7bde9e41c6c235ff1654f47da2e23f3
|
cms_lab_data/cms_app.py
|
cms_lab_data/cms_app.py
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DataApp(CMSApp):
name = 'Data App'
urls = ['cms_lab_data.urls']
app_name = 'cms_lab_data'
apphook_pool.register(DataApp)
|
Create DataApp app hook for CMS
|
Create DataApp app hook for CMS
|
Python
|
bsd-3-clause
|
mfcovington/djangocms-lab-data,mfcovington/djangocms-lab-data,mfcovington/djangocms-lab-data
|
Create DataApp app hook for CMS
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DataApp(CMSApp):
name = 'Data App'
urls = ['cms_lab_data.urls']
app_name = 'cms_lab_data'
apphook_pool.register(DataApp)
|
<commit_before><commit_msg>Create DataApp app hook for CMS<commit_after>
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DataApp(CMSApp):
name = 'Data App'
urls = ['cms_lab_data.urls']
app_name = 'cms_lab_data'
apphook_pool.register(DataApp)
|
Create DataApp app hook for CMSfrom cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DataApp(CMSApp):
name = 'Data App'
urls = ['cms_lab_data.urls']
app_name = 'cms_lab_data'
apphook_pool.register(DataApp)
|
<commit_before><commit_msg>Create DataApp app hook for CMS<commit_after>from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
class DataApp(CMSApp):
name = 'Data App'
urls = ['cms_lab_data.urls']
app_name = 'cms_lab_data'
apphook_pool.register(DataApp)
|
|
9801764a4b60a4ca6630936cdc1e8f85beb6020b
|
cea/tests/test_inputs_setup_workflow.py
|
cea/tests/test_inputs_setup_workflow.py
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
Add test for input setup workflow
|
Add test for input setup workflow
|
Python
|
mit
|
architecture-building-systems/CEAforArcGIS,architecture-building-systems/CEAforArcGIS
|
Add test for input setup workflow
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for input setup workflow<commit_after>
|
import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
Add test for input setup workflowimport os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add test for input setup workflow<commit_after>import os
import unittest
import cea.config
from cea.utilities import create_polygon
from cea.datamanagement import zone_helper, surroundings_helper, terrain_helper, streets_helper, data_initializer, \
archetypes_mapper
# Zug site coordinates
POLYGON_COORDINATES = [(8.513465734818856, 47.178027239429234), (8.515472027162078, 47.177895971877604),
(8.515214535096632, 47.175496635565885), (8.513139577193424, 47.175600066313542),
(8.513465734818856, 47.178027239429234)]
class TestInputSetupWorkflowCase(unittest.TestCase):
def setUp(self):
self.config = cea.config.Configuration(cea.config.DEFAULT_CONFIG)
self.config.project = os.path.expandvars("${TEMP}/reference-case-open")
def test_input_setup_workflow(self):
self.config.create_polygon.coordinates = POLYGON_COORDINATES
self.config.create_polygon.filename = 'site'
data_initializer.main(self.config)
create_polygon.main(self.config)
# TODO: Mock osmnx.create_footprints_download
zone_helper.main(self.config)
surroundings_helper.main(self.config)
terrain_helper.main(self.config)
streets_helper.main(self.config)
archetypes_mapper.main(self.config)
if __name__ == '__main__':
unittest.main()
|
|
4fe20a71ef3a432a8e53bf498d847d9a66b099e9
|
migrations/002_add_month_start.py
|
migrations/002_add_month_start.py
|
"""
Add _week_start_at field to all documents in all collections
"""
from backdrop.core.bucket import utc
from backdrop.core.records import Record
import logging
log = logging.getLogger(__name__)
def up(db):
for name in db.collection_names():
log.info("Migrating collection: {0}".format(name))
collection = db[name]
query = {
"_timestamp": {"$exists": True},
"_month_start_at": {"$exists": False}
}
for document in collection.find(query):
document['_timestamp'] = utc(document['_timestamp'])
record = Record(document)
collection.save(record.to_mongo())
|
Add migration for monthly data
|
Add migration for monthly data
|
Python
|
mit
|
alphagov/backdrop,alphagov/backdrop,alphagov/backdrop
|
Add migration for monthly data
|
"""
Add _week_start_at field to all documents in all collections
"""
from backdrop.core.bucket import utc
from backdrop.core.records import Record
import logging
log = logging.getLogger(__name__)
def up(db):
for name in db.collection_names():
log.info("Migrating collection: {0}".format(name))
collection = db[name]
query = {
"_timestamp": {"$exists": True},
"_month_start_at": {"$exists": False}
}
for document in collection.find(query):
document['_timestamp'] = utc(document['_timestamp'])
record = Record(document)
collection.save(record.to_mongo())
|
<commit_before><commit_msg>Add migration for monthly data<commit_after>
|
"""
Add _week_start_at field to all documents in all collections
"""
from backdrop.core.bucket import utc
from backdrop.core.records import Record
import logging
log = logging.getLogger(__name__)
def up(db):
for name in db.collection_names():
log.info("Migrating collection: {0}".format(name))
collection = db[name]
query = {
"_timestamp": {"$exists": True},
"_month_start_at": {"$exists": False}
}
for document in collection.find(query):
document['_timestamp'] = utc(document['_timestamp'])
record = Record(document)
collection.save(record.to_mongo())
|
Add migration for monthly data"""
Add _week_start_at field to all documents in all collections
"""
from backdrop.core.bucket import utc
from backdrop.core.records import Record
import logging
log = logging.getLogger(__name__)
def up(db):
for name in db.collection_names():
log.info("Migrating collection: {0}".format(name))
collection = db[name]
query = {
"_timestamp": {"$exists": True},
"_month_start_at": {"$exists": False}
}
for document in collection.find(query):
document['_timestamp'] = utc(document['_timestamp'])
record = Record(document)
collection.save(record.to_mongo())
|
<commit_before><commit_msg>Add migration for monthly data<commit_after>"""
Add _week_start_at field to all documents in all collections
"""
from backdrop.core.bucket import utc
from backdrop.core.records import Record
import logging
log = logging.getLogger(__name__)
def up(db):
for name in db.collection_names():
log.info("Migrating collection: {0}".format(name))
collection = db[name]
query = {
"_timestamp": {"$exists": True},
"_month_start_at": {"$exists": False}
}
for document in collection.find(query):
document['_timestamp'] = utc(document['_timestamp'])
record = Record(document)
collection.save(record.to_mongo())
|
|
b737cbbd3425d2c661ffa73bff39b18b30d8f914
|
cwappy/libcwap_test.py
|
cwappy/libcwap_test.py
|
#!/usr/bin/env python
import libcwap
def reader(size):
print "Got read"
return 'T' * size
def callback(name):
def actual_callback(*args, **kwargs):
print name, 'got args', args, 'and kwargs', kwargs
return actual_callback
actions = (
callback("time_request_function"),
callback("speaking_clock_request_function"),
callback("noise_request_function"),
callback("alarms_request_function"),
callback("time_set_function"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("delete_data"),
)
if __name__ == '__main__':
libcwap.register(actions)
libcwap.action(reader)
|
Add file for testing libcwap from the python side
|
Add file for testing libcwap from the python side
|
Python
|
mit
|
xim/tsoc,xim/tsoc,xim/tsoc,xim/tsoc
|
Add file for testing libcwap from the python side
|
#!/usr/bin/env python
import libcwap
def reader(size):
print "Got read"
return 'T' * size
def callback(name):
def actual_callback(*args, **kwargs):
print name, 'got args', args, 'and kwargs', kwargs
return actual_callback
actions = (
callback("time_request_function"),
callback("speaking_clock_request_function"),
callback("noise_request_function"),
callback("alarms_request_function"),
callback("time_set_function"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("delete_data"),
)
if __name__ == '__main__':
libcwap.register(actions)
libcwap.action(reader)
|
<commit_before><commit_msg>Add file for testing libcwap from the python side<commit_after>
|
#!/usr/bin/env python
import libcwap
def reader(size):
print "Got read"
return 'T' * size
def callback(name):
def actual_callback(*args, **kwargs):
print name, 'got args', args, 'and kwargs', kwargs
return actual_callback
actions = (
callback("time_request_function"),
callback("speaking_clock_request_function"),
callback("noise_request_function"),
callback("alarms_request_function"),
callback("time_set_function"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("delete_data"),
)
if __name__ == '__main__':
libcwap.register(actions)
libcwap.action(reader)
|
Add file for testing libcwap from the python side#!/usr/bin/env python
import libcwap
def reader(size):
print "Got read"
return 'T' * size
def callback(name):
def actual_callback(*args, **kwargs):
print name, 'got args', args, 'and kwargs', kwargs
return actual_callback
actions = (
callback("time_request_function"),
callback("speaking_clock_request_function"),
callback("noise_request_function"),
callback("alarms_request_function"),
callback("time_set_function"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("delete_data"),
)
if __name__ == '__main__':
libcwap.register(actions)
libcwap.action(reader)
|
<commit_before><commit_msg>Add file for testing libcwap from the python side<commit_after>#!/usr/bin/env python
import libcwap
def reader(size):
print "Got read"
return 'T' * size
def callback(name):
def actual_callback(*args, **kwargs):
print name, 'got args', args, 'and kwargs', kwargs
return actual_callback
actions = (
callback("time_request_function"),
callback("speaking_clock_request_function"),
callback("noise_request_function"),
callback("alarms_request_function"),
callback("time_set_function"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("record_data"),
callback("delete_data"),
)
if __name__ == '__main__':
libcwap.register(actions)
libcwap.action(reader)
|
|
8533068444eacf4a7731d3543b9308cd6d41b51d
|
Core/Color.py
|
Core/Color.py
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Color.py
# -------------------
# update : 2013-11-21
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#-
#
# External dependencies
#
#-
from numpy import empty
from numpy.linalg import norm
#--
#
# Double2Color
#
#--
#
# Convert a value in range [ 0.0, 1.0 ] to a pseudo-color
#
def Value2Color( value ) :
if( value < 0.0 ) : return [ 0.0, 0.0 , 1.0 ]
if( value < 0.25 ) : return [ 0.0, value * 4.0, 1.0 ]
if( value < 0.50 ) : return [ 0.0, 1.0, 1.0 - (value - 0.25) * 4.0 ]
if( value < 0.75 ) : return [ (value - 0.5) * 4.0, 1.0, 0.0 ]
if( value < 1.0 ) : return [ 1.0, 1.0 - (value - 0.75) * 4.0, 0.0 ]
return [ 1.0, 0.0, 0.0 ]
#--
#
# Array2Color
#
#--
#
# Convert an array to a pseudo-color
#
def Array2Color( values ) :
# Initialize variables
colors = empty( (len(values), 3) )
value_lengths = empty( (len(values), 1) )
# Compute value vector length
for i in range( len(values) ) :
value_lengths[i] = norm( values[i] )
# Compute minimum and maximum value
min_value = value_lengths.min()
max_value = value_lengths.max()
# Compute the range of the values
value_range = max_value - min_value
# Convert each value to a pseudo-color
for i in range( len(values) ) :
colors[i] = Value2Color( (value_lengths[i] - min_value) / value_range )
# Return result
return colors
|
Add some color utility functions.
|
Add some color utility functions.
|
Python
|
mit
|
microy/MeshToolkit,microy/PyMeshToolkit,microy/MeshToolkit,microy/PyMeshToolkit
|
Add some color utility functions.
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Color.py
# -------------------
# update : 2013-11-21
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#-
#
# External dependencies
#
#-
from numpy import empty
from numpy.linalg import norm
#--
#
# Double2Color
#
#--
#
# Convert a value in range [ 0.0, 1.0 ] to a pseudo-color
#
def Value2Color( value ) :
if( value < 0.0 ) : return [ 0.0, 0.0 , 1.0 ]
if( value < 0.25 ) : return [ 0.0, value * 4.0, 1.0 ]
if( value < 0.50 ) : return [ 0.0, 1.0, 1.0 - (value - 0.25) * 4.0 ]
if( value < 0.75 ) : return [ (value - 0.5) * 4.0, 1.0, 0.0 ]
if( value < 1.0 ) : return [ 1.0, 1.0 - (value - 0.75) * 4.0, 0.0 ]
return [ 1.0, 0.0, 0.0 ]
#--
#
# Array2Color
#
#--
#
# Convert an array to a pseudo-color
#
def Array2Color( values ) :
# Initialize variables
colors = empty( (len(values), 3) )
value_lengths = empty( (len(values), 1) )
# Compute value vector length
for i in range( len(values) ) :
value_lengths[i] = norm( values[i] )
# Compute minimum and maximum value
min_value = value_lengths.min()
max_value = value_lengths.max()
# Compute the range of the values
value_range = max_value - min_value
# Convert each value to a pseudo-color
for i in range( len(values) ) :
colors[i] = Value2Color( (value_lengths[i] - min_value) / value_range )
# Return result
return colors
|
<commit_before><commit_msg>Add some color utility functions.<commit_after>
|
# -*- coding:utf-8 -*-
# ***************************************************************************
# Color.py
# -------------------
# update : 2013-11-21
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#-
#
# External dependencies
#
#-
from numpy import empty
from numpy.linalg import norm
#--
#
# Double2Color
#
#--
#
# Convert a value in range [ 0.0, 1.0 ] to a pseudo-color
#
def Value2Color( value ) :
if( value < 0.0 ) : return [ 0.0, 0.0 , 1.0 ]
if( value < 0.25 ) : return [ 0.0, value * 4.0, 1.0 ]
if( value < 0.50 ) : return [ 0.0, 1.0, 1.0 - (value - 0.25) * 4.0 ]
if( value < 0.75 ) : return [ (value - 0.5) * 4.0, 1.0, 0.0 ]
if( value < 1.0 ) : return [ 1.0, 1.0 - (value - 0.75) * 4.0, 0.0 ]
return [ 1.0, 0.0, 0.0 ]
#--
#
# Array2Color
#
#--
#
# Convert an array to a pseudo-color
#
def Array2Color( values ) :
# Initialize variables
colors = empty( (len(values), 3) )
value_lengths = empty( (len(values), 1) )
# Compute value vector length
for i in range( len(values) ) :
value_lengths[i] = norm( values[i] )
# Compute minimum and maximum value
min_value = value_lengths.min()
max_value = value_lengths.max()
# Compute the range of the values
value_range = max_value - min_value
# Convert each value to a pseudo-color
for i in range( len(values) ) :
colors[i] = Value2Color( (value_lengths[i] - min_value) / value_range )
# Return result
return colors
|
Add some color utility functions.# -*- coding:utf-8 -*-
# ***************************************************************************
# Color.py
# -------------------
# update : 2013-11-21
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#-
#
# External dependencies
#
#-
from numpy import empty
from numpy.linalg import norm
#--
#
# Double2Color
#
#--
#
# Convert a value in range [ 0.0, 1.0 ] to a pseudo-color
#
def Value2Color( value ) :
if( value < 0.0 ) : return [ 0.0, 0.0 , 1.0 ]
if( value < 0.25 ) : return [ 0.0, value * 4.0, 1.0 ]
if( value < 0.50 ) : return [ 0.0, 1.0, 1.0 - (value - 0.25) * 4.0 ]
if( value < 0.75 ) : return [ (value - 0.5) * 4.0, 1.0, 0.0 ]
if( value < 1.0 ) : return [ 1.0, 1.0 - (value - 0.75) * 4.0, 0.0 ]
return [ 1.0, 0.0, 0.0 ]
#--
#
# Array2Color
#
#--
#
# Convert an array to a pseudo-color
#
def Array2Color( values ) :
# Initialize variables
colors = empty( (len(values), 3) )
value_lengths = empty( (len(values), 1) )
# Compute value vector length
for i in range( len(values) ) :
value_lengths[i] = norm( values[i] )
# Compute minimum and maximum value
min_value = value_lengths.min()
max_value = value_lengths.max()
# Compute the range of the values
value_range = max_value - min_value
# Convert each value to a pseudo-color
for i in range( len(values) ) :
colors[i] = Value2Color( (value_lengths[i] - min_value) / value_range )
# Return result
return colors
|
<commit_before><commit_msg>Add some color utility functions.<commit_after># -*- coding:utf-8 -*-
# ***************************************************************************
# Color.py
# -------------------
# update : 2013-11-21
# copyright : (C) 2013 by Michaël Roy
# email : microygh@gmail.com
# ***************************************************************************
# ***************************************************************************
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation; either version 2 of the License, or *
# * (at your option) any later version. *
# * *
# ***************************************************************************
#-
#
# External dependencies
#
#-
from numpy import empty
from numpy.linalg import norm
#--
#
# Double2Color
#
#--
#
# Convert a value in range [ 0.0, 1.0 ] to a pseudo-color
#
def Value2Color( value ) :
if( value < 0.0 ) : return [ 0.0, 0.0 , 1.0 ]
if( value < 0.25 ) : return [ 0.0, value * 4.0, 1.0 ]
if( value < 0.50 ) : return [ 0.0, 1.0, 1.0 - (value - 0.25) * 4.0 ]
if( value < 0.75 ) : return [ (value - 0.5) * 4.0, 1.0, 0.0 ]
if( value < 1.0 ) : return [ 1.0, 1.0 - (value - 0.75) * 4.0, 0.0 ]
return [ 1.0, 0.0, 0.0 ]
#--
#
# Array2Color
#
#--
#
# Convert an array to a pseudo-color
#
def Array2Color( values ) :
# Initialize variables
colors = empty( (len(values), 3) )
value_lengths = empty( (len(values), 1) )
# Compute value vector length
for i in range( len(values) ) :
value_lengths[i] = norm( values[i] )
# Compute minimum and maximum value
min_value = value_lengths.min()
max_value = value_lengths.max()
# Compute the range of the values
value_range = max_value - min_value
# Convert each value to a pseudo-color
for i in range( len(values) ) :
colors[i] = Value2Color( (value_lengths[i] - min_value) / value_range )
# Return result
return colors
|
|
70294b332920d23f97c93c720e6078e6358b7272
|
prjxray/xyaml.py
|
prjxray/xyaml.py
|
#!/usr/bin/env python3
import io
import re
import yaml
import json
import unittest
def load(f):
data = f.read()
# Strip out of !<tags>
data = re.sub("!<[^>]*>", "", data)
return yaml.load(io.StringIO(data))
def tojson(f):
d = load(f)
return json.dumps(d, sort_keys=True, indent=4)
class XYamlTest(unittest.TestCase):
def test(self):
s = io.StringIO("""\
!<xilinx/xc7series/part>
idcode: 0x362d093
global_clock_regions:
top: !<xilinx/xc7series/global_clock_region>
rows:
0: !<xilinx/xc7series/row>
configuration_buses:
CLB_IO_CLK: !<xilinx/xc7series/configuration_bus>
configuration_columns:
0: !<xilinx/xc7series/configuration_column>
frame_count: 42
""")
djson = tojson(s)
self.assertMultiLineEqual(djson, """\
{
"global_clock_regions": {
"top": {
"rows": {
"0": {
"configuration_buses": {
"CLB_IO_CLK": {
"configuration_columns": {
"0": {
"frame_count": 42
}
}
}
}
}
}
}
},
"idcode": 56807571
}""")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
unittest.main()
else:
assert len(sys.argv) == 2
print(tojson(open(sys.argv[1])))
|
Add wrapper to allow easy loading of yaml files.
|
Add wrapper to allow easy loading of yaml files.
Fixes #327.
Signed-off-by: Tim 'mithro' Ansell <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@mith.ro>
|
Python
|
isc
|
SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray,SymbiFlow/prjxray
|
Add wrapper to allow easy loading of yaml files.
Fixes #327.
Signed-off-by: Tim 'mithro' Ansell <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@mith.ro>
|
#!/usr/bin/env python3
import io
import re
import yaml
import json
import unittest
def load(f):
data = f.read()
# Strip out of !<tags>
data = re.sub("!<[^>]*>", "", data)
return yaml.load(io.StringIO(data))
def tojson(f):
d = load(f)
return json.dumps(d, sort_keys=True, indent=4)
class XYamlTest(unittest.TestCase):
def test(self):
s = io.StringIO("""\
!<xilinx/xc7series/part>
idcode: 0x362d093
global_clock_regions:
top: !<xilinx/xc7series/global_clock_region>
rows:
0: !<xilinx/xc7series/row>
configuration_buses:
CLB_IO_CLK: !<xilinx/xc7series/configuration_bus>
configuration_columns:
0: !<xilinx/xc7series/configuration_column>
frame_count: 42
""")
djson = tojson(s)
self.assertMultiLineEqual(djson, """\
{
"global_clock_regions": {
"top": {
"rows": {
"0": {
"configuration_buses": {
"CLB_IO_CLK": {
"configuration_columns": {
"0": {
"frame_count": 42
}
}
}
}
}
}
}
},
"idcode": 56807571
}""")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
unittest.main()
else:
assert len(sys.argv) == 2
print(tojson(open(sys.argv[1])))
|
<commit_before><commit_msg>Add wrapper to allow easy loading of yaml files.
Fixes #327.
Signed-off-by: Tim 'mithro' Ansell <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@mith.ro><commit_after>
|
#!/usr/bin/env python3
import io
import re
import yaml
import json
import unittest
def load(f):
data = f.read()
# Strip out of !<tags>
data = re.sub("!<[^>]*>", "", data)
return yaml.load(io.StringIO(data))
def tojson(f):
d = load(f)
return json.dumps(d, sort_keys=True, indent=4)
class XYamlTest(unittest.TestCase):
def test(self):
s = io.StringIO("""\
!<xilinx/xc7series/part>
idcode: 0x362d093
global_clock_regions:
top: !<xilinx/xc7series/global_clock_region>
rows:
0: !<xilinx/xc7series/row>
configuration_buses:
CLB_IO_CLK: !<xilinx/xc7series/configuration_bus>
configuration_columns:
0: !<xilinx/xc7series/configuration_column>
frame_count: 42
""")
djson = tojson(s)
self.assertMultiLineEqual(djson, """\
{
"global_clock_regions": {
"top": {
"rows": {
"0": {
"configuration_buses": {
"CLB_IO_CLK": {
"configuration_columns": {
"0": {
"frame_count": 42
}
}
}
}
}
}
}
},
"idcode": 56807571
}""")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
unittest.main()
else:
assert len(sys.argv) == 2
print(tojson(open(sys.argv[1])))
|
Add wrapper to allow easy loading of yaml files.
Fixes #327.
Signed-off-by: Tim 'mithro' Ansell <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@mith.ro>#!/usr/bin/env python3
import io
import re
import yaml
import json
import unittest
def load(f):
data = f.read()
# Strip out of !<tags>
data = re.sub("!<[^>]*>", "", data)
return yaml.load(io.StringIO(data))
def tojson(f):
d = load(f)
return json.dumps(d, sort_keys=True, indent=4)
class XYamlTest(unittest.TestCase):
def test(self):
s = io.StringIO("""\
!<xilinx/xc7series/part>
idcode: 0x362d093
global_clock_regions:
top: !<xilinx/xc7series/global_clock_region>
rows:
0: !<xilinx/xc7series/row>
configuration_buses:
CLB_IO_CLK: !<xilinx/xc7series/configuration_bus>
configuration_columns:
0: !<xilinx/xc7series/configuration_column>
frame_count: 42
""")
djson = tojson(s)
self.assertMultiLineEqual(djson, """\
{
"global_clock_regions": {
"top": {
"rows": {
"0": {
"configuration_buses": {
"CLB_IO_CLK": {
"configuration_columns": {
"0": {
"frame_count": 42
}
}
}
}
}
}
}
},
"idcode": 56807571
}""")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
unittest.main()
else:
assert len(sys.argv) == 2
print(tojson(open(sys.argv[1])))
|
<commit_before><commit_msg>Add wrapper to allow easy loading of yaml files.
Fixes #327.
Signed-off-by: Tim 'mithro' Ansell <b1c1d8736f20db3fb6c1c66bb1455ed43909f0d8@mith.ro><commit_after>#!/usr/bin/env python3
import io
import re
import yaml
import json
import unittest
def load(f):
data = f.read()
# Strip out of !<tags>
data = re.sub("!<[^>]*>", "", data)
return yaml.load(io.StringIO(data))
def tojson(f):
d = load(f)
return json.dumps(d, sort_keys=True, indent=4)
class XYamlTest(unittest.TestCase):
def test(self):
s = io.StringIO("""\
!<xilinx/xc7series/part>
idcode: 0x362d093
global_clock_regions:
top: !<xilinx/xc7series/global_clock_region>
rows:
0: !<xilinx/xc7series/row>
configuration_buses:
CLB_IO_CLK: !<xilinx/xc7series/configuration_bus>
configuration_columns:
0: !<xilinx/xc7series/configuration_column>
frame_count: 42
""")
djson = tojson(s)
self.assertMultiLineEqual(djson, """\
{
"global_clock_regions": {
"top": {
"rows": {
"0": {
"configuration_buses": {
"CLB_IO_CLK": {
"configuration_columns": {
"0": {
"frame_count": 42
}
}
}
}
}
}
}
},
"idcode": 56807571
}""")
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
unittest.main()
else:
assert len(sys.argv) == 2
print(tojson(open(sys.argv[1])))
|
|
8479da007990d561147951440f1f520ebbcfdadc
|
spyder_unittest/tests/test_unittestplugin.py
|
spyder_unittest/tests/test_unittestplugin.py
|
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""Tests for unittestplugin.py"""
# Third party imports
from qtpy.QtWidgets import QWidget
import pytest
# Local imports
from spyder_unittest.unittestplugin import UnitTestPlugin
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
@pytest.fixture
def plugin(qtbot):
"""Set up the unittest plugin."""
res = UnitTestPlugin(None)
qtbot.addWidget(res)
res.main = Mock()
res.main.run_menu_actions = [42]
res.main.editor.pythonfile_dependent_actions = [42]
res.register_plugin()
return res
def test_initialization(plugin):
"""Check that plugin initialization does not yield an error."""
plugin.show()
|
Add baseline test for initializing plugin
|
Add baseline test for initializing plugin
|
Python
|
mit
|
jitseniesen/spyder-unittest
|
Add baseline test for initializing plugin
|
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""Tests for unittestplugin.py"""
# Third party imports
from qtpy.QtWidgets import QWidget
import pytest
# Local imports
from spyder_unittest.unittestplugin import UnitTestPlugin
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
@pytest.fixture
def plugin(qtbot):
"""Set up the unittest plugin."""
res = UnitTestPlugin(None)
qtbot.addWidget(res)
res.main = Mock()
res.main.run_menu_actions = [42]
res.main.editor.pythonfile_dependent_actions = [42]
res.register_plugin()
return res
def test_initialization(plugin):
"""Check that plugin initialization does not yield an error."""
plugin.show()
|
<commit_before><commit_msg>Add baseline test for initializing plugin<commit_after>
|
# -*- coding: utf-8 -*-
#
# Copyright © 2017 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""Tests for unittestplugin.py"""
# Third party imports
from qtpy.QtWidgets import QWidget
import pytest
# Local imports
from spyder_unittest.unittestplugin import UnitTestPlugin
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
@pytest.fixture
def plugin(qtbot):
"""Set up the unittest plugin."""
res = UnitTestPlugin(None)
qtbot.addWidget(res)
res.main = Mock()
res.main.run_menu_actions = [42]
res.main.editor.pythonfile_dependent_actions = [42]
res.register_plugin()
return res
def test_initialization(plugin):
"""Check that plugin initialization does not yield an error."""
plugin.show()
|
Add baseline test for initializing plugin# -*- coding: utf-8 -*-
#
# Copyright © 2017 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""Tests for unittestplugin.py"""
# Third party imports
from qtpy.QtWidgets import QWidget
import pytest
# Local imports
from spyder_unittest.unittestplugin import UnitTestPlugin
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
@pytest.fixture
def plugin(qtbot):
"""Set up the unittest plugin."""
res = UnitTestPlugin(None)
qtbot.addWidget(res)
res.main = Mock()
res.main.run_menu_actions = [42]
res.main.editor.pythonfile_dependent_actions = [42]
res.register_plugin()
return res
def test_initialization(plugin):
"""Check that plugin initialization does not yield an error."""
plugin.show()
|
<commit_before><commit_msg>Add baseline test for initializing plugin<commit_after># -*- coding: utf-8 -*-
#
# Copyright © 2017 Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""Tests for unittestplugin.py"""
# Third party imports
from qtpy.QtWidgets import QWidget
import pytest
# Local imports
from spyder_unittest.unittestplugin import UnitTestPlugin
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
@pytest.fixture
def plugin(qtbot):
"""Set up the unittest plugin."""
res = UnitTestPlugin(None)
qtbot.addWidget(res)
res.main = Mock()
res.main.run_menu_actions = [42]
res.main.editor.pythonfile_dependent_actions = [42]
res.register_plugin()
return res
def test_initialization(plugin):
"""Check that plugin initialization does not yield an error."""
plugin.show()
|
|
fbb07f1448d7759c13db735f34d30a86938b8bf4
|
tests/test_postgresql.py
|
tests/test_postgresql.py
|
import pytest
from mock import MagicMock
import zmon_aws_agent.postgresql as postgresql
def test_get_databases_from_clusters():
pgclusters = [
{
'id': 'test-1',
'dnsname': 'test-1.db.zalan.do'
}
]
acc = '1234567890'
region = 'eu-xxx-1'
postgresql.list_postgres_databases = MagicMock()
postgresql.list_postgres_databases.return_value = ['db1', 'db2']
databases = postgresql.get_databases_from_clusters(pgclusters, acc, region,
'pguser', 'pgpass')
assert databases == [
{
'id': 'db1-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db1',
'shards': {
'db1': 'test-1.db.zalan.do:5432/db1'
}
},
{
'id': 'db2-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db2',
'shards': {
'db2': 'test-1.db.zalan.do:5432/db2'
}
}
]
|
Add minimal test for new postgresql.py module
|
Add minimal test for new postgresql.py module
|
Python
|
apache-2.0
|
zalando/zmon-aws-agent,zalando/zmon-aws-agent
|
Add minimal test for new postgresql.py module
|
import pytest
from mock import MagicMock
import zmon_aws_agent.postgresql as postgresql
def test_get_databases_from_clusters():
pgclusters = [
{
'id': 'test-1',
'dnsname': 'test-1.db.zalan.do'
}
]
acc = '1234567890'
region = 'eu-xxx-1'
postgresql.list_postgres_databases = MagicMock()
postgresql.list_postgres_databases.return_value = ['db1', 'db2']
databases = postgresql.get_databases_from_clusters(pgclusters, acc, region,
'pguser', 'pgpass')
assert databases == [
{
'id': 'db1-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db1',
'shards': {
'db1': 'test-1.db.zalan.do:5432/db1'
}
},
{
'id': 'db2-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db2',
'shards': {
'db2': 'test-1.db.zalan.do:5432/db2'
}
}
]
|
<commit_before><commit_msg>Add minimal test for new postgresql.py module<commit_after>
|
import pytest
from mock import MagicMock
import zmon_aws_agent.postgresql as postgresql
def test_get_databases_from_clusters():
pgclusters = [
{
'id': 'test-1',
'dnsname': 'test-1.db.zalan.do'
}
]
acc = '1234567890'
region = 'eu-xxx-1'
postgresql.list_postgres_databases = MagicMock()
postgresql.list_postgres_databases.return_value = ['db1', 'db2']
databases = postgresql.get_databases_from_clusters(pgclusters, acc, region,
'pguser', 'pgpass')
assert databases == [
{
'id': 'db1-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db1',
'shards': {
'db1': 'test-1.db.zalan.do:5432/db1'
}
},
{
'id': 'db2-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db2',
'shards': {
'db2': 'test-1.db.zalan.do:5432/db2'
}
}
]
|
Add minimal test for new postgresql.py moduleimport pytest
from mock import MagicMock
import zmon_aws_agent.postgresql as postgresql
def test_get_databases_from_clusters():
pgclusters = [
{
'id': 'test-1',
'dnsname': 'test-1.db.zalan.do'
}
]
acc = '1234567890'
region = 'eu-xxx-1'
postgresql.list_postgres_databases = MagicMock()
postgresql.list_postgres_databases.return_value = ['db1', 'db2']
databases = postgresql.get_databases_from_clusters(pgclusters, acc, region,
'pguser', 'pgpass')
assert databases == [
{
'id': 'db1-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db1',
'shards': {
'db1': 'test-1.db.zalan.do:5432/db1'
}
},
{
'id': 'db2-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db2',
'shards': {
'db2': 'test-1.db.zalan.do:5432/db2'
}
}
]
|
<commit_before><commit_msg>Add minimal test for new postgresql.py module<commit_after>import pytest
from mock import MagicMock
import zmon_aws_agent.postgresql as postgresql
def test_get_databases_from_clusters():
pgclusters = [
{
'id': 'test-1',
'dnsname': 'test-1.db.zalan.do'
}
]
acc = '1234567890'
region = 'eu-xxx-1'
postgresql.list_postgres_databases = MagicMock()
postgresql.list_postgres_databases.return_value = ['db1', 'db2']
databases = postgresql.get_databases_from_clusters(pgclusters, acc, region,
'pguser', 'pgpass')
assert databases == [
{
'id': 'db1-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db1',
'shards': {
'db1': 'test-1.db.zalan.do:5432/db1'
}
},
{
'id': 'db2-test-1.db.zalan.do[aws:1234567890:eu-xxx-1]',
'type': 'postgresql_database',
'created_by': 'agent',
'infrastructure_account': acc,
'region': region,
'postgresql_cluster': 'test-1',
'database_name': 'db2',
'shards': {
'db2': 'test-1.db.zalan.do:5432/db2'
}
}
]
|
|
2bbffd5eec2e42897969e504551c16d4abbf5ba9
|
tests/test_test_utils.py
|
tests/test_test_utils.py
|
from django.test import TestCase
from constance import config
from constance.test import override_config
class OverrideConfigFunctionDecoratorTestCase(TestCase):
"""Test that the override_config decorator works correctly.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the method decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
self.assertFalse(config.BOOL_VALUE)
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
class OverrideConfigClassDecoratorTestCase(TestCase):
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
|
Add test cases for override_config
|
Add test cases for override_config
Test usage of override_config in different forms
Ensure flexibility between decorator and context manager
|
Python
|
bsd-3-clause
|
pombredanne/django-constance,winzard/django-constance,jonzlin95/django-constance,pombredanne/django-constance,jonzlin95/django-constance,jazzband/django-constance,dmugtasimov/django-constance,jezdez/django-constance,APSL/django-constance,jazzband/django-constance,dmugtasimov/django-constance,winzard/django-constance,thorgate/django-constance,jazzband/django-constance,jezdez/django-constance,APSL/django-constance,thorgate/django-constance
|
Add test cases for override_config
Test usage of override_config in different forms
Ensure flexibility between decorator and context manager
|
from django.test import TestCase
from constance import config
from constance.test import override_config
class OverrideConfigFunctionDecoratorTestCase(TestCase):
"""Test that the override_config decorator works correctly.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the method decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
self.assertFalse(config.BOOL_VALUE)
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
class OverrideConfigClassDecoratorTestCase(TestCase):
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
|
<commit_before><commit_msg>Add test cases for override_config
Test usage of override_config in different forms
Ensure flexibility between decorator and context manager<commit_after>
|
from django.test import TestCase
from constance import config
from constance.test import override_config
class OverrideConfigFunctionDecoratorTestCase(TestCase):
"""Test that the override_config decorator works correctly.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the method decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
self.assertFalse(config.BOOL_VALUE)
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
class OverrideConfigClassDecoratorTestCase(TestCase):
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
|
Add test cases for override_config
Test usage of override_config in different forms
Ensure flexibility between decorator and context managerfrom django.test import TestCase
from constance import config
from constance.test import override_config
class OverrideConfigFunctionDecoratorTestCase(TestCase):
"""Test that the override_config decorator works correctly.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the method decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
self.assertFalse(config.BOOL_VALUE)
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
class OverrideConfigClassDecoratorTestCase(TestCase):
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
|
<commit_before><commit_msg>Add test cases for override_config
Test usage of override_config in different forms
Ensure flexibility between decorator and context manager<commit_after>from django.test import TestCase
from constance import config
from constance.test import override_config
class OverrideConfigFunctionDecoratorTestCase(TestCase):
"""Test that the override_config decorator works correctly.
Test usage of override_config on test method and as context manager.
"""
def test_default_value_is_true(self):
"""Assert that the default value of config.BOOL_VALUE is True."""
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
def test_override_config_on_method_changes_config_value(self):
"""Assert that the method decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
def test_override_config_as_context_manager_changes_config_value(self):
"""Assert that the context manager changes config.BOOL_VALUE."""
with override_config(BOOL_VALUE=False):
self.assertFalse(config.BOOL_VALUE)
self.assertTrue(config.BOOL_VALUE)
@override_config(BOOL_VALUE=False)
class OverrideConfigClassDecoratorTestCase(TestCase):
"""Test that the override_config decorator works on classes."""
def test_override_config_on_class_changes_config_value(self):
"""Asser that the class decorator changes config.BOOL_VALUE."""
self.assertFalse(config.BOOL_VALUE)
|
|
bc69916b71a04a4f54ef7c8ca2fb7142260634f2
|
tests/test_validators.py
|
tests/test_validators.py
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
Add first basic unittests using py.test
|
Add first basic unittests using py.test
|
Python
|
bsd-3-clause
|
jmagnusson/wtforms,cklein/wtforms,pawl/wtforms,pawl/wtforms,subyraman/wtforms,crast/wtforms,Aaron1992/wtforms,Xender/wtforms,wtforms/wtforms,hsum/wtforms,Aaron1992/wtforms,skytreader/wtforms
|
Add first basic unittests using py.test
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>
|
"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
Add first basic unittests using py.test"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
<commit_before><commit_msg>Add first basic unittests using py.test<commit_after>"""
test_validators
~~~~~~~~~~~~~~
Unittests for bundled validators.
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
from py.test import raises
from wtforms.validators import ValidationError, length, url, not_empty, email, ip_address
class DummyForm(object):
pass
class DummyField(object):
def __init__(self, data):
self.data = data
form = DummyForm()
def test_email():
assert email(form, DummyField('foo@bar.dk')) == None
assert email(form, DummyField('123@bar.dk')) == None
assert email(form, DummyField('foo@456.dk')) == None
assert email(form, DummyField('foo@bar456.info')) == None
raises(ValidationError, email, form, DummyField('foo')) == None
raises(ValidationError, email, form, DummyField('bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@')) == None
raises(ValidationError, email, form, DummyField('@bar.dk')) == None
raises(ValidationError, email, form, DummyField('foo@bar')) == None
raises(ValidationError, email, form, DummyField('foo@bar.ab12')) == None
raises(ValidationError, email, form, DummyField('foo@bar.abcde')) == None
def test_length():
field = DummyField('foobar')
assert length(min=2, max=6)(form, field) == None
raises(ValidationError, length(min=7), form, field)
raises(ValidationError, length(max=5), form, field)
def test_url():
assert url()(form, DummyField('http://foobar.dk')) == None
assert url()(form, DummyField('http://foobar.dk/')) == None
assert url()(form, DummyField('http://foobar.dk/foobar')) == None
raises(ValidationError, url(), form, DummyField('http://foobar'))
raises(ValidationError, url(), form, DummyField('foobar.dk'))
raises(ValidationError, url(), form, DummyField('http://foobar.12'))
def test_not_empty():
assert not_empty()(form, DummyField('foobar')) == None
raises(ValidationError, not_empty(), form, DummyField(''))
raises(ValidationError, not_empty(), form, DummyField(' '))
def test_ip_address():
assert ip_address(form, DummyField('127.0.0.1')) == None
raises(ValidationError, ip_address, form, DummyField('abc.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('1278.0.0.1'))
raises(ValidationError, ip_address, form, DummyField('127.0.0.abc'))
|
|
b189e7043115f788093b129815ff0bde5895ee0b
|
glanguage/__init__.py
|
glanguage/__init__.py
|
import httplib2
try:
import simplejson as json
except:
import json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
class LanguageClient(object):
def __init__(self):
credentials = GoogleCredentials.get_application_default().create_scoped(OAUTH_SCOPE)
http = httplib2.Http()
credentials.authorize(http)
self.service = discovery.build('language','v1beta1',http=http,discoveryServiceUrl=DISCOVERY_URL)
def get_entities(self,document):
"""Get named entites in document."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')},'encodingType':'UTF8'}
service_request = self.service.documents().analyzeEntities(body=request_body)
response_body = service_request.execute()
return response_body['entities']
def get_sentiment(self,document):
"""Get sentiment in document as polarity and magnitude."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')}}
service_request = self.service.documents().analyzeSentiment(body=request_body)
response_body = service_request.execute()
return response_body['documentSentiment']
|
Add Natural Language API wrapper.
|
Add Natural Language API wrapper.
|
Python
|
apache-2.0
|
alexcchan/google
|
Add Natural Language API wrapper.
|
import httplib2
try:
import simplejson as json
except:
import json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
class LanguageClient(object):
def __init__(self):
credentials = GoogleCredentials.get_application_default().create_scoped(OAUTH_SCOPE)
http = httplib2.Http()
credentials.authorize(http)
self.service = discovery.build('language','v1beta1',http=http,discoveryServiceUrl=DISCOVERY_URL)
def get_entities(self,document):
"""Get named entites in document."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')},'encodingType':'UTF8'}
service_request = self.service.documents().analyzeEntities(body=request_body)
response_body = service_request.execute()
return response_body['entities']
def get_sentiment(self,document):
"""Get sentiment in document as polarity and magnitude."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')}}
service_request = self.service.documents().analyzeSentiment(body=request_body)
response_body = service_request.execute()
return response_body['documentSentiment']
|
<commit_before><commit_msg>Add Natural Language API wrapper.<commit_after>
|
import httplib2
try:
import simplejson as json
except:
import json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
class LanguageClient(object):
def __init__(self):
credentials = GoogleCredentials.get_application_default().create_scoped(OAUTH_SCOPE)
http = httplib2.Http()
credentials.authorize(http)
self.service = discovery.build('language','v1beta1',http=http,discoveryServiceUrl=DISCOVERY_URL)
def get_entities(self,document):
"""Get named entites in document."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')},'encodingType':'UTF8'}
service_request = self.service.documents().analyzeEntities(body=request_body)
response_body = service_request.execute()
return response_body['entities']
def get_sentiment(self,document):
"""Get sentiment in document as polarity and magnitude."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')}}
service_request = self.service.documents().analyzeSentiment(body=request_body)
response_body = service_request.execute()
return response_body['documentSentiment']
|
Add Natural Language API wrapper.import httplib2
try:
import simplejson as json
except:
import json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
class LanguageClient(object):
def __init__(self):
credentials = GoogleCredentials.get_application_default().create_scoped(OAUTH_SCOPE)
http = httplib2.Http()
credentials.authorize(http)
self.service = discovery.build('language','v1beta1',http=http,discoveryServiceUrl=DISCOVERY_URL)
def get_entities(self,document):
"""Get named entites in document."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')},'encodingType':'UTF8'}
service_request = self.service.documents().analyzeEntities(body=request_body)
response_body = service_request.execute()
return response_body['entities']
def get_sentiment(self,document):
"""Get sentiment in document as polarity and magnitude."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')}}
service_request = self.service.documents().analyzeSentiment(body=request_body)
response_body = service_request.execute()
return response_body['documentSentiment']
|
<commit_before><commit_msg>Add Natural Language API wrapper.<commit_after>import httplib2
try:
import simplejson as json
except:
import json
from googleapiclient import discovery
from oauth2client.client import GoogleCredentials
OAUTH_SCOPE = 'https://www.googleapis.com/auth/cloud-platform'
DISCOVERY_URL = 'https://{api}.googleapis.com/$discovery/rest?version={apiVersion}'
class LanguageClient(object):
def __init__(self):
credentials = GoogleCredentials.get_application_default().create_scoped(OAUTH_SCOPE)
http = httplib2.Http()
credentials.authorize(http)
self.service = discovery.build('language','v1beta1',http=http,discoveryServiceUrl=DISCOVERY_URL)
def get_entities(self,document):
"""Get named entites in document."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')},'encodingType':'UTF8'}
service_request = self.service.documents().analyzeEntities(body=request_body)
response_body = service_request.execute()
return response_body['entities']
def get_sentiment(self,document):
"""Get sentiment in document as polarity and magnitude."""
request_body={'document':{'type':'PLAIN_TEXT','content':document.encode('utf-8')}}
service_request = self.service.documents().analyzeSentiment(body=request_body)
response_body = service_request.execute()
return response_body['documentSentiment']
|
|
809781cc832d79e1f746df385317ce4dae6223b3
|
tests/test_GetCalls.py
|
tests/test_GetCalls.py
|
#!/usr/bin/env python3
import codecs, json
from PokeFacts import RedditBot
from PokeFacts import DataPulls
def getDataPullsObject():
store = DataPulls.ItemStore({'term_property': 'term'})
with codecs.open('tests/test_data.json', "r", "utf-8") as data_file:
store.addItems(json.load(data_file))
return DataPulls.DataPulls(store=store)
def test_GetCalls():
main = RedditBot.CallResponse(reddit=False, data=getDataPullsObject())
calls = main.get_calls("{charizard} {charzard} { charizard }")
assert len(calls) == 1
assert calls[0].term == 'charizard'
|
Add unit test for get_calls
|
Add unit test for get_calls
|
Python
|
mit
|
rpokemon/PokeFacts
|
Add unit test for get_calls
|
#!/usr/bin/env python3
import codecs, json
from PokeFacts import RedditBot
from PokeFacts import DataPulls
def getDataPullsObject():
store = DataPulls.ItemStore({'term_property': 'term'})
with codecs.open('tests/test_data.json', "r", "utf-8") as data_file:
store.addItems(json.load(data_file))
return DataPulls.DataPulls(store=store)
def test_GetCalls():
main = RedditBot.CallResponse(reddit=False, data=getDataPullsObject())
calls = main.get_calls("{charizard} {charzard} { charizard }")
assert len(calls) == 1
assert calls[0].term == 'charizard'
|
<commit_before><commit_msg>Add unit test for get_calls<commit_after>
|
#!/usr/bin/env python3
import codecs, json
from PokeFacts import RedditBot
from PokeFacts import DataPulls
def getDataPullsObject():
store = DataPulls.ItemStore({'term_property': 'term'})
with codecs.open('tests/test_data.json', "r", "utf-8") as data_file:
store.addItems(json.load(data_file))
return DataPulls.DataPulls(store=store)
def test_GetCalls():
main = RedditBot.CallResponse(reddit=False, data=getDataPullsObject())
calls = main.get_calls("{charizard} {charzard} { charizard }")
assert len(calls) == 1
assert calls[0].term == 'charizard'
|
Add unit test for get_calls#!/usr/bin/env python3
import codecs, json
from PokeFacts import RedditBot
from PokeFacts import DataPulls
def getDataPullsObject():
store = DataPulls.ItemStore({'term_property': 'term'})
with codecs.open('tests/test_data.json', "r", "utf-8") as data_file:
store.addItems(json.load(data_file))
return DataPulls.DataPulls(store=store)
def test_GetCalls():
main = RedditBot.CallResponse(reddit=False, data=getDataPullsObject())
calls = main.get_calls("{charizard} {charzard} { charizard }")
assert len(calls) == 1
assert calls[0].term == 'charizard'
|
<commit_before><commit_msg>Add unit test for get_calls<commit_after>#!/usr/bin/env python3
import codecs, json
from PokeFacts import RedditBot
from PokeFacts import DataPulls
def getDataPullsObject():
store = DataPulls.ItemStore({'term_property': 'term'})
with codecs.open('tests/test_data.json', "r", "utf-8") as data_file:
store.addItems(json.load(data_file))
return DataPulls.DataPulls(store=store)
def test_GetCalls():
main = RedditBot.CallResponse(reddit=False, data=getDataPullsObject())
calls = main.get_calls("{charizard} {charzard} { charizard }")
assert len(calls) == 1
assert calls[0].term == 'charizard'
|
|
47f4e738cc11ec40d3410332106163b0235f5da4
|
tests/python/tests/test_result.py
|
tests/python/tests/test_result.py
|
import unittest
import librepo
from librepo import LibrepoException
class TestCaseResult(unittest.TestCase):
def test_result_getinfo(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(ValueError, r.getinfo, 99999999)
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPO))
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPOMD))
self.assertRaises(LibrepoException, r.getinfo, librepo.LRR_YUM_TIMESTAMP)
def test_result_attrs(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(AttributeError, getattr, r, 'foobar_attr')
# Attrs should not be filled (that's why None or
# LibrepoException is expected), but they definitelly
# should exists (not AttributeError should be raised)
self.assertFalse(r.yum_repo)
self.assertFalse(r.yum_repomd)
self.assertRaises(LibrepoException, getattr, r, 'yum_timestamp')
|
Add tests for Result object
|
Tests: Add tests for Result object
|
Python
|
lgpl-2.1
|
Conan-Kudo/librepo,bgamari/librepo,Tojaj/librepo,rholy/librepo,rholy/librepo,cgwalters/librepo,rpm-software-management/librepo,Tojaj/librepo,Conan-Kudo/librepo,rholy/librepo,cgwalters/librepo,cgwalters/librepo,rholy/librepo,Conan-Kudo/librepo,cgwalters/librepo,rpm-software-management/librepo,Tojaj/librepo,rpm-software-management/librepo,bgamari/librepo,bgamari/librepo
|
Tests: Add tests for Result object
|
import unittest
import librepo
from librepo import LibrepoException
class TestCaseResult(unittest.TestCase):
def test_result_getinfo(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(ValueError, r.getinfo, 99999999)
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPO))
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPOMD))
self.assertRaises(LibrepoException, r.getinfo, librepo.LRR_YUM_TIMESTAMP)
def test_result_attrs(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(AttributeError, getattr, r, 'foobar_attr')
# Attrs should not be filled (that's why None or
# LibrepoException is expected), but they definitelly
# should exists (not AttributeError should be raised)
self.assertFalse(r.yum_repo)
self.assertFalse(r.yum_repomd)
self.assertRaises(LibrepoException, getattr, r, 'yum_timestamp')
|
<commit_before><commit_msg>Tests: Add tests for Result object<commit_after>
|
import unittest
import librepo
from librepo import LibrepoException
class TestCaseResult(unittest.TestCase):
def test_result_getinfo(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(ValueError, r.getinfo, 99999999)
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPO))
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPOMD))
self.assertRaises(LibrepoException, r.getinfo, librepo.LRR_YUM_TIMESTAMP)
def test_result_attrs(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(AttributeError, getattr, r, 'foobar_attr')
# Attrs should not be filled (that's why None or
# LibrepoException is expected), but they definitelly
# should exists (not AttributeError should be raised)
self.assertFalse(r.yum_repo)
self.assertFalse(r.yum_repomd)
self.assertRaises(LibrepoException, getattr, r, 'yum_timestamp')
|
Tests: Add tests for Result objectimport unittest
import librepo
from librepo import LibrepoException
class TestCaseResult(unittest.TestCase):
def test_result_getinfo(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(ValueError, r.getinfo, 99999999)
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPO))
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPOMD))
self.assertRaises(LibrepoException, r.getinfo, librepo.LRR_YUM_TIMESTAMP)
def test_result_attrs(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(AttributeError, getattr, r, 'foobar_attr')
# Attrs should not be filled (that's why None or
# LibrepoException is expected), but they definitelly
# should exists (not AttributeError should be raised)
self.assertFalse(r.yum_repo)
self.assertFalse(r.yum_repomd)
self.assertRaises(LibrepoException, getattr, r, 'yum_timestamp')
|
<commit_before><commit_msg>Tests: Add tests for Result object<commit_after>import unittest
import librepo
from librepo import LibrepoException
class TestCaseResult(unittest.TestCase):
def test_result_getinfo(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(ValueError, r.getinfo, 99999999)
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPO))
self.assertFalse(r.getinfo(librepo.LRR_YUM_REPOMD))
self.assertRaises(LibrepoException, r.getinfo, librepo.LRR_YUM_TIMESTAMP)
def test_result_attrs(self):
r = librepo.Result()
self.assertTrue(r)
self.assertRaises(AttributeError, getattr, r, 'foobar_attr')
# Attrs should not be filled (that's why None or
# LibrepoException is expected), but they definitelly
# should exists (not AttributeError should be raised)
self.assertFalse(r.yum_repo)
self.assertFalse(r.yum_repomd)
self.assertRaises(LibrepoException, getattr, r, 'yum_timestamp')
|
|
4736ed07ea8b83ca8c32c2d675f67883050b8c26
|
tests/test_provider_lawrenceks.py
|
tests/test_provider_lawrenceks.py
|
import busbus
from busbus.provider.lawrenceks import LawrenceTransitProvider
import arrow
import pytest
@pytest.fixture(scope='module')
def lawrenceks_provider():
return LawrenceTransitProvider()
def test_43_to_eaton_hall(lawrenceks_provider):
stop = lawrenceks_provider.get(busbus.Stop, u'15TH_SPAHR_WB')
route = lawrenceks_provider.get(busbus.Route, u'RT_43')
assert len(list(lawrenceks_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == 13
|
Add a simple test case for LawrenceTransitProvider
|
Add a simple test case for LawrenceTransitProvider
|
Python
|
mit
|
spaceboats/busbus
|
Add a simple test case for LawrenceTransitProvider
|
import busbus
from busbus.provider.lawrenceks import LawrenceTransitProvider
import arrow
import pytest
@pytest.fixture(scope='module')
def lawrenceks_provider():
return LawrenceTransitProvider()
def test_43_to_eaton_hall(lawrenceks_provider):
stop = lawrenceks_provider.get(busbus.Stop, u'15TH_SPAHR_WB')
route = lawrenceks_provider.get(busbus.Route, u'RT_43')
assert len(list(lawrenceks_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == 13
|
<commit_before><commit_msg>Add a simple test case for LawrenceTransitProvider<commit_after>
|
import busbus
from busbus.provider.lawrenceks import LawrenceTransitProvider
import arrow
import pytest
@pytest.fixture(scope='module')
def lawrenceks_provider():
return LawrenceTransitProvider()
def test_43_to_eaton_hall(lawrenceks_provider):
stop = lawrenceks_provider.get(busbus.Stop, u'15TH_SPAHR_WB')
route = lawrenceks_provider.get(busbus.Route, u'RT_43')
assert len(list(lawrenceks_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == 13
|
Add a simple test case for LawrenceTransitProviderimport busbus
from busbus.provider.lawrenceks import LawrenceTransitProvider
import arrow
import pytest
@pytest.fixture(scope='module')
def lawrenceks_provider():
return LawrenceTransitProvider()
def test_43_to_eaton_hall(lawrenceks_provider):
stop = lawrenceks_provider.get(busbus.Stop, u'15TH_SPAHR_WB')
route = lawrenceks_provider.get(busbus.Route, u'RT_43')
assert len(list(lawrenceks_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == 13
|
<commit_before><commit_msg>Add a simple test case for LawrenceTransitProvider<commit_after>import busbus
from busbus.provider.lawrenceks import LawrenceTransitProvider
import arrow
import pytest
@pytest.fixture(scope='module')
def lawrenceks_provider():
return LawrenceTransitProvider()
def test_43_to_eaton_hall(lawrenceks_provider):
stop = lawrenceks_provider.get(busbus.Stop, u'15TH_SPAHR_WB')
route = lawrenceks_provider.get(busbus.Route, u'RT_43')
assert len(list(lawrenceks_provider.arrivals.where(
stop=stop, route=route,
start_time=arrow.get('2015-03-10T14:00:00-05:00'),
end_time=arrow.get('2015-03-10T16:00:00-05:00')))) == 13
|
|
15d782aaddf1e8a4215df2fa3ef60b8801fe382a
|
tests_tf/test_utils.py
|
tests_tf/test_utils.py
|
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from cleverhans.utils_tf import kl_with_logits, l2_batch_normalize
def numpy_kl_with_logits(q_logits, p_logits):
def numpy_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
q = numpy_softmax(q_logits)
p = numpy_softmax(p_logits)
return (q * (np.log(q) - np.log(p))).sum(axis=1).mean()
class TestUtils(unittest.TestCase):
def test_l2_batch_normalize(self):
import tensorflow as tf
with tf.Session() as sess:
x = tf.random_normal((100, 1000))
x_norm = sess.run(l2_batch_normalize(x))
self.assertTrue(
np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6))
def test_kl_with_logits(self):
import tensorflow as tf
q_logits = tf.placeholder(tf.float32, shape=(100, 20))
p_logits = tf.placeholder(tf.float32, shape=(100, 20))
q_logits_np = np.random.normal(0, 10, size=(100, 20))
p_logits_np = np.random.normal(0, 10, size=(100, 20))
with tf.Session() as sess:
kl_div_tf = sess.run(kl_with_logits(q_logits, p_logits),
feed_dict={q_logits: q_logits_np,
p_logits: p_logits_np})
kl_div_ref = numpy_kl_with_logits(q_logits_np, p_logits_np)
self.assertTrue(np.allclose(kl_div_ref, kl_div_tf))
if __name__ == '__main__':
unittest.main()
|
Add tests for vat utils functions
|
Add tests for vat utils functions
|
Python
|
mit
|
fartashf/cleverhans,carlini/cleverhans,openai/cleverhans,cleverhans-lab/cleverhans,cleverhans-lab/cleverhans,cihangxie/cleverhans,carlini/cleverhans,cleverhans-lab/cleverhans
|
Add tests for vat utils functions
|
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from cleverhans.utils_tf import kl_with_logits, l2_batch_normalize
def numpy_kl_with_logits(q_logits, p_logits):
def numpy_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
q = numpy_softmax(q_logits)
p = numpy_softmax(p_logits)
return (q * (np.log(q) - np.log(p))).sum(axis=1).mean()
class TestUtils(unittest.TestCase):
def test_l2_batch_normalize(self):
import tensorflow as tf
with tf.Session() as sess:
x = tf.random_normal((100, 1000))
x_norm = sess.run(l2_batch_normalize(x))
self.assertTrue(
np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6))
def test_kl_with_logits(self):
import tensorflow as tf
q_logits = tf.placeholder(tf.float32, shape=(100, 20))
p_logits = tf.placeholder(tf.float32, shape=(100, 20))
q_logits_np = np.random.normal(0, 10, size=(100, 20))
p_logits_np = np.random.normal(0, 10, size=(100, 20))
with tf.Session() as sess:
kl_div_tf = sess.run(kl_with_logits(q_logits, p_logits),
feed_dict={q_logits: q_logits_np,
p_logits: p_logits_np})
kl_div_ref = numpy_kl_with_logits(q_logits_np, p_logits_np)
self.assertTrue(np.allclose(kl_div_ref, kl_div_tf))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for vat utils functions<commit_after>
|
from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from cleverhans.utils_tf import kl_with_logits, l2_batch_normalize
def numpy_kl_with_logits(q_logits, p_logits):
def numpy_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
q = numpy_softmax(q_logits)
p = numpy_softmax(p_logits)
return (q * (np.log(q) - np.log(p))).sum(axis=1).mean()
class TestUtils(unittest.TestCase):
def test_l2_batch_normalize(self):
import tensorflow as tf
with tf.Session() as sess:
x = tf.random_normal((100, 1000))
x_norm = sess.run(l2_batch_normalize(x))
self.assertTrue(
np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6))
def test_kl_with_logits(self):
import tensorflow as tf
q_logits = tf.placeholder(tf.float32, shape=(100, 20))
p_logits = tf.placeholder(tf.float32, shape=(100, 20))
q_logits_np = np.random.normal(0, 10, size=(100, 20))
p_logits_np = np.random.normal(0, 10, size=(100, 20))
with tf.Session() as sess:
kl_div_tf = sess.run(kl_with_logits(q_logits, p_logits),
feed_dict={q_logits: q_logits_np,
p_logits: p_logits_np})
kl_div_ref = numpy_kl_with_logits(q_logits_np, p_logits_np)
self.assertTrue(np.allclose(kl_div_ref, kl_div_tf))
if __name__ == '__main__':
unittest.main()
|
Add tests for vat utils functionsfrom __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from cleverhans.utils_tf import kl_with_logits, l2_batch_normalize
def numpy_kl_with_logits(q_logits, p_logits):
def numpy_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
q = numpy_softmax(q_logits)
p = numpy_softmax(p_logits)
return (q * (np.log(q) - np.log(p))).sum(axis=1).mean()
class TestUtils(unittest.TestCase):
def test_l2_batch_normalize(self):
import tensorflow as tf
with tf.Session() as sess:
x = tf.random_normal((100, 1000))
x_norm = sess.run(l2_batch_normalize(x))
self.assertTrue(
np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6))
def test_kl_with_logits(self):
import tensorflow as tf
q_logits = tf.placeholder(tf.float32, shape=(100, 20))
p_logits = tf.placeholder(tf.float32, shape=(100, 20))
q_logits_np = np.random.normal(0, 10, size=(100, 20))
p_logits_np = np.random.normal(0, 10, size=(100, 20))
with tf.Session() as sess:
kl_div_tf = sess.run(kl_with_logits(q_logits, p_logits),
feed_dict={q_logits: q_logits_np,
p_logits: p_logits_np})
kl_div_ref = numpy_kl_with_logits(q_logits_np, p_logits_np)
self.assertTrue(np.allclose(kl_div_ref, kl_div_tf))
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for vat utils functions<commit_after>from __future__ import absolute_import, division, print_function
import unittest
import numpy as np
from cleverhans.utils_tf import kl_with_logits, l2_batch_normalize
def numpy_kl_with_logits(q_logits, p_logits):
def numpy_softmax(logits):
exp_logits = np.exp(logits)
return exp_logits / np.sum(exp_logits, axis=1, keepdims=True)
q = numpy_softmax(q_logits)
p = numpy_softmax(p_logits)
return (q * (np.log(q) - np.log(p))).sum(axis=1).mean()
class TestUtils(unittest.TestCase):
def test_l2_batch_normalize(self):
import tensorflow as tf
with tf.Session() as sess:
x = tf.random_normal((100, 1000))
x_norm = sess.run(l2_batch_normalize(x))
self.assertTrue(
np.allclose(np.sum(x_norm**2, axis=1), 1, atol=1e-6))
def test_kl_with_logits(self):
import tensorflow as tf
q_logits = tf.placeholder(tf.float32, shape=(100, 20))
p_logits = tf.placeholder(tf.float32, shape=(100, 20))
q_logits_np = np.random.normal(0, 10, size=(100, 20))
p_logits_np = np.random.normal(0, 10, size=(100, 20))
with tf.Session() as sess:
kl_div_tf = sess.run(kl_with_logits(q_logits, p_logits),
feed_dict={q_logits: q_logits_np,
p_logits: p_logits_np})
kl_div_ref = numpy_kl_with_logits(q_logits_np, p_logits_np)
self.assertTrue(np.allclose(kl_div_ref, kl_div_tf))
if __name__ == '__main__':
unittest.main()
|
|
810eeddaff32f9b608b0b61cfcb48826ec1b15bf
|
various/Crop_Big_ROIs.py
|
various/Crop_Big_ROIs.py
|
# @DatasetService datasetservice
# @ImageDisplayService displayservice
# @ImageJ ij
# @AbstractLogService log
# @DefaultLegacyService legacyservice
from ij import IJ
from ij import Macro
from ij.plugin.frame import RoiManager
from io.scif.img import ImgSaver
from net.imagej import DefaultDataset
from loci.plugins import BF
from loci.plugins import LociExporter
from loci.plugins.out import Exporter
from loci.plugins.in import ImporterOptions
from loci.common import Region
import os
import sys
import glob
sys.path.append(os.path.join(IJ.getDirectory('plugins'), "Scripts", "Plugins"))
from libtools import crop
from libtools.utils import get_dt
def main():
# Get image path
fname = "/home/hadim/local/data/microscopy_data/zurick/movies/metC.ome.tif"
basename = os.path.basename(fname)
dir_path = os.path.dirname(fname)
if not fname:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Open ROIs
roi_path = os.path.join(dir_path, "RoiSet.zip")
if not os.path.isfile(roi_path):
try:
roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0]
except:
roi_path = None
if not roi_path:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
rois = RoiManager(True)
rois.reset()
rois.runCommand("Open", roi_path)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Open cropped region %i / %i" % (crop_id, len(rois_array)))
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, basename)
crop_fname = os.path.join(os.path.dirname(fname), crop_basename)
# Get bounds and crop
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
options = ImporterOptions()
options.setCrop(True)
options.setCropRegion(0, Region(x, y, w, h))
options.setId(fname)
#options.setVirtual(True)
imps = BF.openImagePlus(options)
imp = imps[0]
# Save cropped image
bfExporter = LociExporter()
macroOpts = "save=[" + crop_fname + "]"
bfExporter.setup(None, imp)
Macro.setOptions(macroOpts)
bfExporter.run(None)
imp.close()
IJ.log('Done')
main()
|
Add crop multi roi alternative script
|
Add crop multi roi alternative script
|
Python
|
bsd-3-clause
|
hadim/fiji_tools,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_scripts,hadim/fiji_tools
|
Add crop multi roi alternative script
|
# @DatasetService datasetservice
# @ImageDisplayService displayservice
# @ImageJ ij
# @AbstractLogService log
# @DefaultLegacyService legacyservice
from ij import IJ
from ij import Macro
from ij.plugin.frame import RoiManager
from io.scif.img import ImgSaver
from net.imagej import DefaultDataset
from loci.plugins import BF
from loci.plugins import LociExporter
from loci.plugins.out import Exporter
from loci.plugins.in import ImporterOptions
from loci.common import Region
import os
import sys
import glob
sys.path.append(os.path.join(IJ.getDirectory('plugins'), "Scripts", "Plugins"))
from libtools import crop
from libtools.utils import get_dt
def main():
# Get image path
fname = "/home/hadim/local/data/microscopy_data/zurick/movies/metC.ome.tif"
basename = os.path.basename(fname)
dir_path = os.path.dirname(fname)
if not fname:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Open ROIs
roi_path = os.path.join(dir_path, "RoiSet.zip")
if not os.path.isfile(roi_path):
try:
roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0]
except:
roi_path = None
if not roi_path:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
rois = RoiManager(True)
rois.reset()
rois.runCommand("Open", roi_path)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Open cropped region %i / %i" % (crop_id, len(rois_array)))
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, basename)
crop_fname = os.path.join(os.path.dirname(fname), crop_basename)
# Get bounds and crop
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
options = ImporterOptions()
options.setCrop(True)
options.setCropRegion(0, Region(x, y, w, h))
options.setId(fname)
#options.setVirtual(True)
imps = BF.openImagePlus(options)
imp = imps[0]
# Save cropped image
bfExporter = LociExporter()
macroOpts = "save=[" + crop_fname + "]"
bfExporter.setup(None, imp)
Macro.setOptions(macroOpts)
bfExporter.run(None)
imp.close()
IJ.log('Done')
main()
|
<commit_before><commit_msg>Add crop multi roi alternative script<commit_after>
|
# @DatasetService datasetservice
# @ImageDisplayService displayservice
# @ImageJ ij
# @AbstractLogService log
# @DefaultLegacyService legacyservice
from ij import IJ
from ij import Macro
from ij.plugin.frame import RoiManager
from io.scif.img import ImgSaver
from net.imagej import DefaultDataset
from loci.plugins import BF
from loci.plugins import LociExporter
from loci.plugins.out import Exporter
from loci.plugins.in import ImporterOptions
from loci.common import Region
import os
import sys
import glob
sys.path.append(os.path.join(IJ.getDirectory('plugins'), "Scripts", "Plugins"))
from libtools import crop
from libtools.utils import get_dt
def main():
# Get image path
fname = "/home/hadim/local/data/microscopy_data/zurick/movies/metC.ome.tif"
basename = os.path.basename(fname)
dir_path = os.path.dirname(fname)
if not fname:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Open ROIs
roi_path = os.path.join(dir_path, "RoiSet.zip")
if not os.path.isfile(roi_path):
try:
roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0]
except:
roi_path = None
if not roi_path:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
rois = RoiManager(True)
rois.reset()
rois.runCommand("Open", roi_path)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Open cropped region %i / %i" % (crop_id, len(rois_array)))
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, basename)
crop_fname = os.path.join(os.path.dirname(fname), crop_basename)
# Get bounds and crop
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
options = ImporterOptions()
options.setCrop(True)
options.setCropRegion(0, Region(x, y, w, h))
options.setId(fname)
#options.setVirtual(True)
imps = BF.openImagePlus(options)
imp = imps[0]
# Save cropped image
bfExporter = LociExporter()
macroOpts = "save=[" + crop_fname + "]"
bfExporter.setup(None, imp)
Macro.setOptions(macroOpts)
bfExporter.run(None)
imp.close()
IJ.log('Done')
main()
|
Add crop multi roi alternative script# @DatasetService datasetservice
# @ImageDisplayService displayservice
# @ImageJ ij
# @AbstractLogService log
# @DefaultLegacyService legacyservice
from ij import IJ
from ij import Macro
from ij.plugin.frame import RoiManager
from io.scif.img import ImgSaver
from net.imagej import DefaultDataset
from loci.plugins import BF
from loci.plugins import LociExporter
from loci.plugins.out import Exporter
from loci.plugins.in import ImporterOptions
from loci.common import Region
import os
import sys
import glob
sys.path.append(os.path.join(IJ.getDirectory('plugins'), "Scripts", "Plugins"))
from libtools import crop
from libtools.utils import get_dt
def main():
# Get image path
fname = "/home/hadim/local/data/microscopy_data/zurick/movies/metC.ome.tif"
basename = os.path.basename(fname)
dir_path = os.path.dirname(fname)
if not fname:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Open ROIs
roi_path = os.path.join(dir_path, "RoiSet.zip")
if not os.path.isfile(roi_path):
try:
roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0]
except:
roi_path = None
if not roi_path:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
rois = RoiManager(True)
rois.reset()
rois.runCommand("Open", roi_path)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Open cropped region %i / %i" % (crop_id, len(rois_array)))
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, basename)
crop_fname = os.path.join(os.path.dirname(fname), crop_basename)
# Get bounds and crop
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
options = ImporterOptions()
options.setCrop(True)
options.setCropRegion(0, Region(x, y, w, h))
options.setId(fname)
#options.setVirtual(True)
imps = BF.openImagePlus(options)
imp = imps[0]
# Save cropped image
bfExporter = LociExporter()
macroOpts = "save=[" + crop_fname + "]"
bfExporter.setup(None, imp)
Macro.setOptions(macroOpts)
bfExporter.run(None)
imp.close()
IJ.log('Done')
main()
|
<commit_before><commit_msg>Add crop multi roi alternative script<commit_after># @DatasetService datasetservice
# @ImageDisplayService displayservice
# @ImageJ ij
# @AbstractLogService log
# @DefaultLegacyService legacyservice
from ij import IJ
from ij import Macro
from ij.plugin.frame import RoiManager
from io.scif.img import ImgSaver
from net.imagej import DefaultDataset
from loci.plugins import BF
from loci.plugins import LociExporter
from loci.plugins.out import Exporter
from loci.plugins.in import ImporterOptions
from loci.common import Region
import os
import sys
import glob
sys.path.append(os.path.join(IJ.getDirectory('plugins'), "Scripts", "Plugins"))
from libtools import crop
from libtools.utils import get_dt
def main():
# Get image path
fname = "/home/hadim/local/data/microscopy_data/zurick/movies/metC.ome.tif"
basename = os.path.basename(fname)
dir_path = os.path.dirname(fname)
if not fname:
IJ.showMessage('Source image needs to match a file on the system.')
return
# Open ROIs
roi_path = os.path.join(dir_path, "RoiSet.zip")
if not os.path.isfile(roi_path):
try:
roi_path = glob.glob(os.path.join(dir_path, "*.roi"))[0]
except:
roi_path = None
if not roi_path:
IJ.showMessage('No ROIs. Please use Analyze > Tools > ROI Manager...')
return
rois = RoiManager(True)
rois.reset()
rois.runCommand("Open", roi_path)
IJ.log('Image filename is %s' % fname)
rois_array = rois.getRoisAsArray()
for i, roi in enumerate(rois_array):
crop_id = i +1
IJ.log("Open cropped region %i / %i" % (crop_id, len(rois_array)))
# Get filename and basename of the current cropped image
crop_basename = "crop%i_%s" % (crop_id, basename)
crop_fname = os.path.join(os.path.dirname(fname), crop_basename)
# Get bounds and crop
bounds = roi.getBounds()
x = bounds.x
y = bounds.y
w = bounds.width
h = bounds.height
# Import only cropped region of the image
options = ImporterOptions()
options.setCrop(True)
options.setCropRegion(0, Region(x, y, w, h))
options.setId(fname)
#options.setVirtual(True)
imps = BF.openImagePlus(options)
imp = imps[0]
# Save cropped image
bfExporter = LociExporter()
macroOpts = "save=[" + crop_fname + "]"
bfExporter.setup(None, imp)
Macro.setOptions(macroOpts)
bfExporter.run(None)
imp.close()
IJ.log('Done')
main()
|
|
8f1e94f79ddd398112ed33485bfc3d735e1edda2
|
maint/scripts/download_wheels.py
|
maint/scripts/download_wheels.py
|
#!/usr/bin/env python3
import asyncio
import json
import pathlib
import sys
from tornado.httpclient import AsyncHTTPClient
BASE_URL = "https://ci.appveyor.com/api"
async def fetch_job(directory, job):
http = AsyncHTTPClient()
artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts")
paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)]
for path in paths:
artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}")
with open(directory.joinpath(path.name), "wb") as f:
f.write(artifact.body)
async def main():
http = AsyncHTTPClient()
try:
_, version = sys.argv
except ValueError:
print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr)
sys.exit(1)
directory = pathlib.Path(f"downloads-{version}")
directory.mkdir(exist_ok=True)
build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}")
jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]]
await asyncio.gather(*(fetch_job(directory, job) for job in jobs))
if __name__ == "__main__":
asyncio.run(main())
|
Add script to download wheels from appveyor
|
Add script to download wheels from appveyor
|
Python
|
apache-2.0
|
mivade/tornado,tornadoweb/tornado,bdarnell/tornado,mivade/tornado,dongpinglai/my_tornado,mivade/tornado,dongpinglai/my_tornado,tornadoweb/tornado,NoyaInRain/tornado,NoyaInRain/tornado,NoyaInRain/tornado,allenl203/tornado,bdarnell/tornado,dongpinglai/my_tornado,lilydjwg/tornado,tornadoweb/tornado,dongpinglai/my_tornado,lilydjwg/tornado,allenl203/tornado,wujuguang/tornado,allenl203/tornado,lilydjwg/tornado,lilydjwg/tornado,wujuguang/tornado,bdarnell/tornado,bdarnell/tornado,wujuguang/tornado,mivade/tornado,mivade/tornado,NoyaInRain/tornado,wujuguang/tornado,NoyaInRain/tornado,bdarnell/tornado,allenl203/tornado,dongpinglai/my_tornado,dongpinglai/my_tornado,NoyaInRain/tornado,allenl203/tornado,tornadoweb/tornado,wujuguang/tornado
|
Add script to download wheels from appveyor
|
#!/usr/bin/env python3
import asyncio
import json
import pathlib
import sys
from tornado.httpclient import AsyncHTTPClient
BASE_URL = "https://ci.appveyor.com/api"
async def fetch_job(directory, job):
http = AsyncHTTPClient()
artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts")
paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)]
for path in paths:
artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}")
with open(directory.joinpath(path.name), "wb") as f:
f.write(artifact.body)
async def main():
http = AsyncHTTPClient()
try:
_, version = sys.argv
except ValueError:
print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr)
sys.exit(1)
directory = pathlib.Path(f"downloads-{version}")
directory.mkdir(exist_ok=True)
build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}")
jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]]
await asyncio.gather(*(fetch_job(directory, job) for job in jobs))
if __name__ == "__main__":
asyncio.run(main())
|
<commit_before><commit_msg>Add script to download wheels from appveyor<commit_after>
|
#!/usr/bin/env python3
import asyncio
import json
import pathlib
import sys
from tornado.httpclient import AsyncHTTPClient
BASE_URL = "https://ci.appveyor.com/api"
async def fetch_job(directory, job):
http = AsyncHTTPClient()
artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts")
paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)]
for path in paths:
artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}")
with open(directory.joinpath(path.name), "wb") as f:
f.write(artifact.body)
async def main():
http = AsyncHTTPClient()
try:
_, version = sys.argv
except ValueError:
print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr)
sys.exit(1)
directory = pathlib.Path(f"downloads-{version}")
directory.mkdir(exist_ok=True)
build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}")
jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]]
await asyncio.gather(*(fetch_job(directory, job) for job in jobs))
if __name__ == "__main__":
asyncio.run(main())
|
Add script to download wheels from appveyor#!/usr/bin/env python3
import asyncio
import json
import pathlib
import sys
from tornado.httpclient import AsyncHTTPClient
BASE_URL = "https://ci.appveyor.com/api"
async def fetch_job(directory, job):
http = AsyncHTTPClient()
artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts")
paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)]
for path in paths:
artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}")
with open(directory.joinpath(path.name), "wb") as f:
f.write(artifact.body)
async def main():
http = AsyncHTTPClient()
try:
_, version = sys.argv
except ValueError:
print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr)
sys.exit(1)
directory = pathlib.Path(f"downloads-{version}")
directory.mkdir(exist_ok=True)
build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}")
jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]]
await asyncio.gather(*(fetch_job(directory, job) for job in jobs))
if __name__ == "__main__":
asyncio.run(main())
|
<commit_before><commit_msg>Add script to download wheels from appveyor<commit_after>#!/usr/bin/env python3
import asyncio
import json
import pathlib
import sys
from tornado.httpclient import AsyncHTTPClient
BASE_URL = "https://ci.appveyor.com/api"
async def fetch_job(directory, job):
http = AsyncHTTPClient()
artifacts = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts")
paths = [pathlib.PurePosixPath(a["fileName"]) for a in json.loads(artifacts.body)]
for path in paths:
artifact = await http.fetch(f"{BASE_URL}/buildjobs/{job}/artifacts/{path}")
with open(directory.joinpath(path.name), "wb") as f:
f.write(artifact.body)
async def main():
http = AsyncHTTPClient()
try:
_, version = sys.argv
except ValueError:
print("usage: maint/scripts/download_wheels.py v6.0.1", file=sys.stderr)
sys.exit(1)
directory = pathlib.Path(f"downloads-{version}")
directory.mkdir(exist_ok=True)
build = await http.fetch(f"{BASE_URL}/projects/bdarnell/tornado/branch/{version}")
jobs = [job["jobId"] for job in json.loads(build.body)["build"]["jobs"]]
await asyncio.gather(*(fetch_job(directory, job) for job in jobs))
if __name__ == "__main__":
asyncio.run(main())
|
|
1846e6fe7f6b6a31f7921303556393e7f6fd9845
|
dev_tools/src/d1_dev/src-print-redbaron-tree.py
|
dev_tools/src/d1_dev/src-print-redbaron-tree.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import d1_dev.util
def main():
"""Print the RedBaron syntax tree for a Python module
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True))
if __name__ == "__main__":
sys.exit(main())
|
Add command to print the syntax tree for a script
|
Add command to print the syntax tree for a script
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add command to print the syntax tree for a script
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import d1_dev.util
def main():
"""Print the RedBaron syntax tree for a Python module
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True))
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add command to print the syntax tree for a script<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import d1_dev.util
def main():
"""Print the RedBaron syntax tree for a Python module
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True))
if __name__ == "__main__":
sys.exit(main())
|
Add command to print the syntax tree for a script#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import d1_dev.util
def main():
"""Print the RedBaron syntax tree for a Python module
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True))
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add command to print the syntax tree for a script<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import d1_dev.util
def main():
"""Print the RedBaron syntax tree for a Python module
"""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument("path", help="Python module path")
args = parser.parse_args()
r = d1_dev.util.redbaron_module_path_to_tree(args.path)
print(r.help(True))
if __name__ == "__main__":
sys.exit(main())
|
|
e9f30ec92520f8caa4f5d08fdf43b08ced84fd6b
|
CodeFights/leastFactorial.py
|
CodeFights/leastFactorial.py
|
#!/usr/local/bin/python
# Code Fights Least Factorial (Core) Problem
def leastFactorial(n):
def factGen():
m, res = 1, 1
while True:
res *= m
yield res
m += 1
for f in factGen():
if f >= n:
return f
def main():
tests = [
[17, 24],
[1, 1],
[5, 6]
]
for t in tests:
res = leastFactorial(t[0])
if t[1] == res:
print("PASSED: leastFactorial({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: leastFactorial({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights least factorial problem
|
Solve Code Fights least factorial problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights least factorial problem
|
#!/usr/local/bin/python
# Code Fights Least Factorial (Core) Problem
def leastFactorial(n):
def factGen():
m, res = 1, 1
while True:
res *= m
yield res
m += 1
for f in factGen():
if f >= n:
return f
def main():
tests = [
[17, 24],
[1, 1],
[5, 6]
]
for t in tests:
res = leastFactorial(t[0])
if t[1] == res:
print("PASSED: leastFactorial({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: leastFactorial({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights least factorial problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Least Factorial (Core) Problem
def leastFactorial(n):
def factGen():
m, res = 1, 1
while True:
res *= m
yield res
m += 1
for f in factGen():
if f >= n:
return f
def main():
tests = [
[17, 24],
[1, 1],
[5, 6]
]
for t in tests:
res = leastFactorial(t[0])
if t[1] == res:
print("PASSED: leastFactorial({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: leastFactorial({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights least factorial problem#!/usr/local/bin/python
# Code Fights Least Factorial (Core) Problem
def leastFactorial(n):
def factGen():
m, res = 1, 1
while True:
res *= m
yield res
m += 1
for f in factGen():
if f >= n:
return f
def main():
tests = [
[17, 24],
[1, 1],
[5, 6]
]
for t in tests:
res = leastFactorial(t[0])
if t[1] == res:
print("PASSED: leastFactorial({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: leastFactorial({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights least factorial problem<commit_after>#!/usr/local/bin/python
# Code Fights Least Factorial (Core) Problem
def leastFactorial(n):
def factGen():
m, res = 1, 1
while True:
res *= m
yield res
m += 1
for f in factGen():
if f >= n:
return f
def main():
tests = [
[17, 24],
[1, 1],
[5, 6]
]
for t in tests:
res = leastFactorial(t[0])
if t[1] == res:
print("PASSED: leastFactorial({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: leastFactorial({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
77b6e4995743bca4036e5b5dc498cfcdd4e2908e
|
jarviscli/tests/test_wifi_password_getter.py
|
jarviscli/tests/test_wifi_password_getter.py
|
import unittest
from tests import PluginTest
from plugins import wifi_password_getter
from colorama import Fore
class TestWifiPasswordGetter(PluginTest):
"""
A test class that contains test cases for the methods of
the wifi_password_getter plugin for Windows.
"""
def setUp(self):
self.test = self.load_plugin(
wifi_password_getter.WifiPasswordGetterWINDOWS)
def test_show_options_last_text(self):
self.queue_input("2")
profiles = ["profile_1", "profile_2", "profile_3"]
self.test.show_options(self.jarvis_api, profiles)
self.assertEqual(self.history_say().last_text(), "4: Exit")
def test_get_choice_valid(self):
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api,
input_text,
max_valid_value),
2)
def test_get_choice_terminator(self):
self.queue_input("3")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api, input_text, max_valid_value), -1)
def test_get_choice_invalid(self):
self.queue_input("7")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
def test_get_choice_exception(self):
self.queue_input("wrong_input")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
if __name__ == '__main__':
unittest.main()
|
Create test cases for the wifi_password_getter plugin
|
Create test cases for the wifi_password_getter plugin
|
Python
|
mit
|
sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis,sukeesh/Jarvis
|
Create test cases for the wifi_password_getter plugin
|
import unittest
from tests import PluginTest
from plugins import wifi_password_getter
from colorama import Fore
class TestWifiPasswordGetter(PluginTest):
"""
A test class that contains test cases for the methods of
the wifi_password_getter plugin for Windows.
"""
def setUp(self):
self.test = self.load_plugin(
wifi_password_getter.WifiPasswordGetterWINDOWS)
def test_show_options_last_text(self):
self.queue_input("2")
profiles = ["profile_1", "profile_2", "profile_3"]
self.test.show_options(self.jarvis_api, profiles)
self.assertEqual(self.history_say().last_text(), "4: Exit")
def test_get_choice_valid(self):
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api,
input_text,
max_valid_value),
2)
def test_get_choice_terminator(self):
self.queue_input("3")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api, input_text, max_valid_value), -1)
def test_get_choice_invalid(self):
self.queue_input("7")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
def test_get_choice_exception(self):
self.queue_input("wrong_input")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test cases for the wifi_password_getter plugin<commit_after>
|
import unittest
from tests import PluginTest
from plugins import wifi_password_getter
from colorama import Fore
class TestWifiPasswordGetter(PluginTest):
"""
A test class that contains test cases for the methods of
the wifi_password_getter plugin for Windows.
"""
def setUp(self):
self.test = self.load_plugin(
wifi_password_getter.WifiPasswordGetterWINDOWS)
def test_show_options_last_text(self):
self.queue_input("2")
profiles = ["profile_1", "profile_2", "profile_3"]
self.test.show_options(self.jarvis_api, profiles)
self.assertEqual(self.history_say().last_text(), "4: Exit")
def test_get_choice_valid(self):
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api,
input_text,
max_valid_value),
2)
def test_get_choice_terminator(self):
self.queue_input("3")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api, input_text, max_valid_value), -1)
def test_get_choice_invalid(self):
self.queue_input("7")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
def test_get_choice_exception(self):
self.queue_input("wrong_input")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
if __name__ == '__main__':
unittest.main()
|
Create test cases for the wifi_password_getter pluginimport unittest
from tests import PluginTest
from plugins import wifi_password_getter
from colorama import Fore
class TestWifiPasswordGetter(PluginTest):
"""
A test class that contains test cases for the methods of
the wifi_password_getter plugin for Windows.
"""
def setUp(self):
self.test = self.load_plugin(
wifi_password_getter.WifiPasswordGetterWINDOWS)
def test_show_options_last_text(self):
self.queue_input("2")
profiles = ["profile_1", "profile_2", "profile_3"]
self.test.show_options(self.jarvis_api, profiles)
self.assertEqual(self.history_say().last_text(), "4: Exit")
def test_get_choice_valid(self):
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api,
input_text,
max_valid_value),
2)
def test_get_choice_terminator(self):
self.queue_input("3")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api, input_text, max_valid_value), -1)
def test_get_choice_invalid(self):
self.queue_input("7")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
def test_get_choice_exception(self):
self.queue_input("wrong_input")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create test cases for the wifi_password_getter plugin<commit_after>import unittest
from tests import PluginTest
from plugins import wifi_password_getter
from colorama import Fore
class TestWifiPasswordGetter(PluginTest):
"""
A test class that contains test cases for the methods of
the wifi_password_getter plugin for Windows.
"""
def setUp(self):
self.test = self.load_plugin(
wifi_password_getter.WifiPasswordGetterWINDOWS)
def test_show_options_last_text(self):
self.queue_input("2")
profiles = ["profile_1", "profile_2", "profile_3"]
self.test.show_options(self.jarvis_api, profiles)
self.assertEqual(self.history_say().last_text(), "4: Exit")
def test_get_choice_valid(self):
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api,
input_text,
max_valid_value),
2)
def test_get_choice_terminator(self):
self.queue_input("3")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.assertEqual(
self.test.get_choice(
self.jarvis_api, input_text, max_valid_value), -1)
def test_get_choice_invalid(self):
self.queue_input("7")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
def test_get_choice_exception(self):
self.queue_input("wrong_input")
self.queue_input("2")
input_text = "Please select a number or Exit: "
max_valid_value = 3
self.test.get_choice(self.jarvis_api, input_text, max_valid_value)
self.assertEqual(
self.history_say().last_text(),
"Invalid input! Enter a number from the choices provided.")
self.assertEqual(self.history_say().last_color(), Fore.YELLOW)
if __name__ == '__main__':
unittest.main()
|
|
374f32a1d5feaf2e912d901b9398f50f00e7d481
|
scripts/most_recent.py
|
scripts/most_recent.py
|
from datetime import datetime
from optparse import OptionParser
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
if __name__ == '__main__':
usage = "%prog <USERNAME> <WEB SERVER>"
parser = OptionParser(usage=usage)
opts,args = parser.parse_args()
if len(args) != 2:
parser.error("All arguments are required.")
username,server = args
if server[:7] != "http://":
server = "http://%s" % (server,)
soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks'
% (server, username)))
gobbles_list = soup.find('ul', {'class': 'gobbles'})
timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0]
print datetime.fromtimestamp(float(timestamp))
|
Add script to print time of most recent gobble.
|
Add script to print time of most recent gobble.
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add script to print time of most recent gobble.
|
from datetime import datetime
from optparse import OptionParser
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
if __name__ == '__main__':
usage = "%prog <USERNAME> <WEB SERVER>"
parser = OptionParser(usage=usage)
opts,args = parser.parse_args()
if len(args) != 2:
parser.error("All arguments are required.")
username,server = args
if server[:7] != "http://":
server = "http://%s" % (server,)
soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks'
% (server, username)))
gobbles_list = soup.find('ul', {'class': 'gobbles'})
timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0]
print datetime.fromtimestamp(float(timestamp))
|
<commit_before><commit_msg>Add script to print time of most recent gobble.<commit_after>
|
from datetime import datetime
from optparse import OptionParser
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
if __name__ == '__main__':
usage = "%prog <USERNAME> <WEB SERVER>"
parser = OptionParser(usage=usage)
opts,args = parser.parse_args()
if len(args) != 2:
parser.error("All arguments are required.")
username,server = args
if server[:7] != "http://":
server = "http://%s" % (server,)
soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks'
% (server, username)))
gobbles_list = soup.find('ul', {'class': 'gobbles'})
timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0]
print datetime.fromtimestamp(float(timestamp))
|
Add script to print time of most recent gobble.from datetime import datetime
from optparse import OptionParser
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
if __name__ == '__main__':
usage = "%prog <USERNAME> <WEB SERVER>"
parser = OptionParser(usage=usage)
opts,args = parser.parse_args()
if len(args) != 2:
parser.error("All arguments are required.")
username,server = args
if server[:7] != "http://":
server = "http://%s" % (server,)
soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks'
% (server, username)))
gobbles_list = soup.find('ul', {'class': 'gobbles'})
timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0]
print datetime.fromtimestamp(float(timestamp))
|
<commit_before><commit_msg>Add script to print time of most recent gobble.<commit_after>from datetime import datetime
from optparse import OptionParser
from urllib2 import urlopen
from BeautifulSoup import BeautifulSoup
if __name__ == '__main__':
usage = "%prog <USERNAME> <WEB SERVER>"
parser = OptionParser(usage=usage)
opts,args = parser.parse_args()
if len(args) != 2:
parser.error("All arguments are required.")
username,server = args
if server[:7] != "http://":
server = "http://%s" % (server,)
soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks'
% (server, username)))
gobbles_list = soup.find('ul', {'class': 'gobbles'})
timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0]
print datetime.fromtimestamp(float(timestamp))
|
|
b47dbd6b6f2e19632e90036f14cd85bbf3f8cbd1
|
utils/get_collection_object_count.py
|
utils/get_collection_object_count.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys, os
import argparse
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
def main(argv=None):
parser = argparse.ArgumentParser(description='Print count of objects for a given collection.')
parser.add_argument('path', help="Nuxeo path to collection")
parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils")
if argv is None:
argv = parser.parse_args()
dh = DeepHarvestNuxeo(argv.path, 'barbarahui_test_bucket', argv.pynuxrc)
print "about to fetch objects for path {}".format(dh.path)
objects = dh.fetch_objects()
print "finished"
print "len(objects): {}".format(len(objects))
if __name__ == "__main__":
sys.exit(main())
|
Add script to get count of objects in a collection.
|
Add script to get count of objects in a collection.
|
Python
|
bsd-3-clause
|
barbarahui/nuxeo-calisphere,barbarahui/nuxeo-calisphere
|
Add script to get count of objects in a collection.
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys, os
import argparse
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
def main(argv=None):
parser = argparse.ArgumentParser(description='Print count of objects for a given collection.')
parser.add_argument('path', help="Nuxeo path to collection")
parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils")
if argv is None:
argv = parser.parse_args()
dh = DeepHarvestNuxeo(argv.path, 'barbarahui_test_bucket', argv.pynuxrc)
print "about to fetch objects for path {}".format(dh.path)
objects = dh.fetch_objects()
print "finished"
print "len(objects): {}".format(len(objects))
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to get count of objects in a collection.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys, os
import argparse
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
def main(argv=None):
parser = argparse.ArgumentParser(description='Print count of objects for a given collection.')
parser.add_argument('path', help="Nuxeo path to collection")
parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils")
if argv is None:
argv = parser.parse_args()
dh = DeepHarvestNuxeo(argv.path, 'barbarahui_test_bucket', argv.pynuxrc)
print "about to fetch objects for path {}".format(dh.path)
objects = dh.fetch_objects()
print "finished"
print "len(objects): {}".format(len(objects))
if __name__ == "__main__":
sys.exit(main())
|
Add script to get count of objects in a collection.#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys, os
import argparse
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
def main(argv=None):
parser = argparse.ArgumentParser(description='Print count of objects for a given collection.')
parser.add_argument('path', help="Nuxeo path to collection")
parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils")
if argv is None:
argv = parser.parse_args()
dh = DeepHarvestNuxeo(argv.path, 'barbarahui_test_bucket', argv.pynuxrc)
print "about to fetch objects for path {}".format(dh.path)
objects = dh.fetch_objects()
print "finished"
print "len(objects): {}".format(len(objects))
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to get count of objects in a collection.<commit_after>#!/usr/bin/env python
# -*- coding: utf8 -*-
import sys, os
import argparse
from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo
def main(argv=None):
parser = argparse.ArgumentParser(description='Print count of objects for a given collection.')
parser.add_argument('path', help="Nuxeo path to collection")
parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils")
if argv is None:
argv = parser.parse_args()
dh = DeepHarvestNuxeo(argv.path, 'barbarahui_test_bucket', argv.pynuxrc)
print "about to fetch objects for path {}".format(dh.path)
objects = dh.fetch_objects()
print "finished"
print "len(objects): {}".format(len(objects))
if __name__ == "__main__":
sys.exit(main())
|
|
0798e457957b3db8f5de1891900d639961d78a0f
|
emgapimetadata/management/commands/test-data.py
|
emgapimetadata/management/commands/test-data.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
Add command line tool to import metadata
|
Add command line tool to import metadata
|
Python
|
apache-2.0
|
EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi,EBI-Metagenomics/emgapi
|
Add command line tool to import metadata
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
<commit_before><commit_msg>Add command line tool to import metadata<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
Add command line tool to import metadata#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
<commit_before><commit_msg>Add command line tool to import metadata<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import csv
from django.core.management.base import BaseCommand
from emgapimetadata import models as m_models
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('importpath', type=str)
def handle(self, *args, **options):
self.populate(options)
def populate(self, options):
# check if path is valid
_path = options.get('importpath', None)
if os.path.exists(_path):
if os.path.isdir(_path):
for root, dirs, files in os.walk(_path, topdown=False):
for name in files:
accession = name.split("_")[0]
f = os.path.join(root, name)
if name.endswith("go"):
self.import_go(f, accession)
# TODO: is file get dir:
elif os.path.isfile(_path):
raise NotImplemented("Give path to directory.")
else:
raise NotImplemented("Path doesn't exist.")
def import_go(self, f, accession):
with open(f, newline='') as fcsv:
reader = csv.reader(fcsv)
run = m_models.Run()
run.accession = "ERR700147"
run.pipeline_version = "1.0"
for row in reader:
try:
ann = m_models.Annotation(
accession=row[0],
description=row[1],
lineage=row[2],
).save()
except:
ann = m_models.Annotation.objects.get(accession=row[0])
rann = m_models.RunAnnotation()
rann.count = row[3]
rann.annotation = ann
run.annotations.append(rann)
# ranns = m_models.RunAnnotation.objects.insert(ranns)
run.save()
|
|
b3f185033ee758e9407240243e263e07c8a28e35
|
services/imu-logger.py
|
services/imu-logger.py
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
DELAY = 1 # in seconds
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
while True:
orientation = sense.get_orientation_degrees()
print(orientation)
acceleration = sense.get_accelerometer()
compass = sense.get_compass()
temperature_from_humidity = sense.get_temperature()
temperature_from_pressure = sense.get_temperature_from_pressure()
db.gyroscope.insert_one({
"pitch": orientation["pitch"],
"roll": orientation["roll"],
"yaw": orientation["yaw"]
})
db.accelerometer.insert_one({
"pitch": acceleration["pitch"],
"roll": acceleration["roll"],
"yaw": acceleration["yaw"]
})
db.compass.insert_one({"angle": compass})
db.temperature.insert_one({
"from_humidity": temperature_from_humidity,
"from_pressure": temperature_from_pressure
})
time.sleep(DELAY)
|
Create script to log imu values
|
Create script to log imu values
|
Python
|
bsd-3-clause
|
gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2,gizmo-cda/g2x-submarine-v2
|
Create script to log imu values
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
DELAY = 1 # in seconds
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
while True:
orientation = sense.get_orientation_degrees()
print(orientation)
acceleration = sense.get_accelerometer()
compass = sense.get_compass()
temperature_from_humidity = sense.get_temperature()
temperature_from_pressure = sense.get_temperature_from_pressure()
db.gyroscope.insert_one({
"pitch": orientation["pitch"],
"roll": orientation["roll"],
"yaw": orientation["yaw"]
})
db.accelerometer.insert_one({
"pitch": acceleration["pitch"],
"roll": acceleration["roll"],
"yaw": acceleration["yaw"]
})
db.compass.insert_one({"angle": compass})
db.temperature.insert_one({
"from_humidity": temperature_from_humidity,
"from_pressure": temperature_from_pressure
})
time.sleep(DELAY)
|
<commit_before><commit_msg>Create script to log imu values<commit_after>
|
#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
DELAY = 1 # in seconds
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
while True:
orientation = sense.get_orientation_degrees()
print(orientation)
acceleration = sense.get_accelerometer()
compass = sense.get_compass()
temperature_from_humidity = sense.get_temperature()
temperature_from_pressure = sense.get_temperature_from_pressure()
db.gyroscope.insert_one({
"pitch": orientation["pitch"],
"roll": orientation["roll"],
"yaw": orientation["yaw"]
})
db.accelerometer.insert_one({
"pitch": acceleration["pitch"],
"roll": acceleration["roll"],
"yaw": acceleration["yaw"]
})
db.compass.insert_one({"angle": compass})
db.temperature.insert_one({
"from_humidity": temperature_from_humidity,
"from_pressure": temperature_from_pressure
})
time.sleep(DELAY)
|
Create script to log imu values#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
DELAY = 1 # in seconds
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
while True:
orientation = sense.get_orientation_degrees()
print(orientation)
acceleration = sense.get_accelerometer()
compass = sense.get_compass()
temperature_from_humidity = sense.get_temperature()
temperature_from_pressure = sense.get_temperature_from_pressure()
db.gyroscope.insert_one({
"pitch": orientation["pitch"],
"roll": orientation["roll"],
"yaw": orientation["yaw"]
})
db.accelerometer.insert_one({
"pitch": acceleration["pitch"],
"roll": acceleration["roll"],
"yaw": acceleration["yaw"]
})
db.compass.insert_one({"angle": compass})
db.temperature.insert_one({
"from_humidity": temperature_from_humidity,
"from_pressure": temperature_from_pressure
})
time.sleep(DELAY)
|
<commit_before><commit_msg>Create script to log imu values<commit_after>#!/usr/bin/env python3
from sense_hat import SenseHat
from pymongo import MongoClient
import time
DELAY = 1 # in seconds
sense = SenseHat()
client = MongoClient("mongodb://192.168.0.128:27017")
db = client.g2x
while True:
orientation = sense.get_orientation_degrees()
print(orientation)
acceleration = sense.get_accelerometer()
compass = sense.get_compass()
temperature_from_humidity = sense.get_temperature()
temperature_from_pressure = sense.get_temperature_from_pressure()
db.gyroscope.insert_one({
"pitch": orientation["pitch"],
"roll": orientation["roll"],
"yaw": orientation["yaw"]
})
db.accelerometer.insert_one({
"pitch": acceleration["pitch"],
"roll": acceleration["roll"],
"yaw": acceleration["yaw"]
})
db.compass.insert_one({"angle": compass})
db.temperature.insert_one({
"from_humidity": temperature_from_humidity,
"from_pressure": temperature_from_pressure
})
time.sleep(DELAY)
|
|
a24b3122176b7435469b5275264dd6f53ff78165
|
demo_a3c_continuous.py
|
demo_a3c_continuous.py
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
from train_a3c_continuous import phi, A3CLSTMGaussian
import env_modifiers
def eval_single_run(env, model, phi):
model.reset_state()
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
a = pout.sampled_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
model = A3CLSTMGaussian(obs_size, action_size)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
score = eval_single_run(env, model, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
Add a demo script for gym continous tasks
|
Add a demo script for gym continous tasks
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add a demo script for gym continous tasks
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
from train_a3c_continuous import phi, A3CLSTMGaussian
import env_modifiers
def eval_single_run(env, model, phi):
model.reset_state()
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
a = pout.sampled_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
model = A3CLSTMGaussian(obs_size, action_size)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
score = eval_single_run(env, model, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a demo script for gym continous tasks<commit_after>
|
import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
from train_a3c_continuous import phi, A3CLSTMGaussian
import env_modifiers
def eval_single_run(env, model, phi):
model.reset_state()
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
a = pout.sampled_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
model = A3CLSTMGaussian(obs_size, action_size)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
score = eval_single_run(env, model, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
Add a demo script for gym continous tasksimport argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
from train_a3c_continuous import phi, A3CLSTMGaussian
import env_modifiers
def eval_single_run(env, model, phi):
model.reset_state()
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
a = pout.sampled_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
model = A3CLSTMGaussian(obs_size, action_size)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
score = eval_single_run(env, model, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a demo script for gym continous tasks<commit_after>import argparse
import chainer
from chainer import serializers
import gym
import numpy as np
import random_seed
from train_a3c_continuous import phi, A3CLSTMGaussian
import env_modifiers
def eval_single_run(env, model, phi):
model.reset_state()
test_r = 0
obs = env.reset()
done = False
while not done:
s = chainer.Variable(np.expand_dims(phi(obs), 0))
pout = model.pi_and_v(s)[0]
model.unchain_backward()
a = pout.sampled_actions.data[0]
obs, r, done, info = env.step(a)
test_r += r
return test_r
def main():
import logging
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('model', type=str)
parser.add_argument('--env', type=str, default='Pendulum-v0')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--n-runs', type=int, default=10)
parser.add_argument('--window-visible', action='store_true')
parser.add_argument('--render', action='store_true')
parser.set_defaults(render=False)
args = parser.parse_args()
random_seed.set_random_seed(args.seed)
env = gym.make(args.env)
timestep_limit = env.spec.timestep_limit
env_modifiers.make_timestep_limited(env, timestep_limit)
if args.render:
env_modifiers.make_rendered(env)
obs_size = np.asarray(env.observation_space.shape).prod()
action_size = np.asarray(env.action_space.shape).prod()
model = A3CLSTMGaussian(obs_size, action_size)
serializers.load_hdf5(args.model, model)
scores = []
for i in range(args.n_runs):
score = eval_single_run(env, model, phi)
print('Run {}: {}'.format(i, score))
scores.append(score)
print('Average: {}'.format(sum(scores) / args.n_runs))
if __name__ == '__main__':
main()
|
|
50e16f8212c87ceb1f3fd1c896149b626e6e4178
|
indra/util/__init__.py
|
indra/util/__init__.py
|
def has_str(obj):
if type(obj) == str:
return True
# Check for an iterable
if hasattr(obj, '__iter__'):
for item in obj:
item_has_str = has_str(item)
if item_has_str:
return True
if hasattr(obj, '__dict__'):
for item in obj.__dict__.values():
item_has_str = has_str(item)
if item_has_str:
return True
return False
|
Test for objs with strs rather than unicode
|
Test for objs with strs rather than unicode
|
Python
|
bsd-2-clause
|
jmuhlich/indra,sorgerlab/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/belpy,pvtodorov/indra,bgyori/indra,pvtodorov/indra,sorgerlab/indra,jmuhlich/indra,johnbachman/belpy,johnbachman/indra,jmuhlich/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,sorgerlab/belpy,bgyori/indra,bgyori/indra,johnbachman/belpy,sorgerlab/indra
|
Test for objs with strs rather than unicode
|
def has_str(obj):
if type(obj) == str:
return True
# Check for an iterable
if hasattr(obj, '__iter__'):
for item in obj:
item_has_str = has_str(item)
if item_has_str:
return True
if hasattr(obj, '__dict__'):
for item in obj.__dict__.values():
item_has_str = has_str(item)
if item_has_str:
return True
return False
|
<commit_before><commit_msg>Test for objs with strs rather than unicode<commit_after>
|
def has_str(obj):
if type(obj) == str:
return True
# Check for an iterable
if hasattr(obj, '__iter__'):
for item in obj:
item_has_str = has_str(item)
if item_has_str:
return True
if hasattr(obj, '__dict__'):
for item in obj.__dict__.values():
item_has_str = has_str(item)
if item_has_str:
return True
return False
|
Test for objs with strs rather than unicodedef has_str(obj):
if type(obj) == str:
return True
# Check for an iterable
if hasattr(obj, '__iter__'):
for item in obj:
item_has_str = has_str(item)
if item_has_str:
return True
if hasattr(obj, '__dict__'):
for item in obj.__dict__.values():
item_has_str = has_str(item)
if item_has_str:
return True
return False
|
<commit_before><commit_msg>Test for objs with strs rather than unicode<commit_after>def has_str(obj):
if type(obj) == str:
return True
# Check for an iterable
if hasattr(obj, '__iter__'):
for item in obj:
item_has_str = has_str(item)
if item_has_str:
return True
if hasattr(obj, '__dict__'):
for item in obj.__dict__.values():
item_has_str = has_str(item)
if item_has_str:
return True
return False
|
|
9bce480b245e20f6c6ef93d34e92c27cea9f6d77
|
tests/test_settings.py
|
tests/test_settings.py
|
import pytest
from isort import exceptions
from isort.settings import Config
class TestConfig:
def test_init(self):
assert Config()
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
|
Add initial settings test file
|
Add initial settings test file
|
Python
|
mit
|
PyCQA/isort,PyCQA/isort
|
Add initial settings test file
|
import pytest
from isort import exceptions
from isort.settings import Config
class TestConfig:
def test_init(self):
assert Config()
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
|
<commit_before><commit_msg>Add initial settings test file<commit_after>
|
import pytest
from isort import exceptions
from isort.settings import Config
class TestConfig:
def test_init(self):
assert Config()
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
|
Add initial settings test fileimport pytest
from isort import exceptions
from isort.settings import Config
class TestConfig:
def test_init(self):
assert Config()
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
|
<commit_before><commit_msg>Add initial settings test file<commit_after>import pytest
from isort import exceptions
from isort.settings import Config
class TestConfig:
def test_init(self):
assert Config()
def test_invalid_pyversion(self):
with pytest.raises(ValueError):
Config(py_version=10)
def test_invalid_profile(self):
with pytest.raises(exceptions.ProfileDoesNotExist):
Config(profile="blackandwhitestylemixedwithpep8")
|
|
be70b1528f51385c8221b7337cdc8669f53fa1c6
|
textblob/decorators.py
|
textblob/decorators.py
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from functools import wraps
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
Use wraps decorator for requires_nltk_corpus
|
Use wraps decorator for requires_nltk_corpus
|
Python
|
mit
|
jcalbert/TextBlob,freakynit/TextBlob,nvoron23/TextBlob,IrisSteenhout/TextBlob,adelq/TextBlob,beni55/TextBlob,jonmcoe/TextBlob,dipeshtech/TextBlob,sargam111/python,sloria/TextBlob,Windy-Ground/TextBlob,laugustyniak/TextBlob
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
Use wraps decorator for requires_nltk_corpus
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from functools import wraps
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
<commit_before># -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
<commit_msg>Use wraps decorator for requires_nltk_corpus<commit_after>
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from functools import wraps
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
Use wraps decorator for requires_nltk_corpus# -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from functools import wraps
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
<commit_before># -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
<commit_msg>Use wraps decorator for requires_nltk_corpus<commit_after># -*- coding: utf-8 -*-
'''Custom decorators.'''
from __future__ import absolute_import
from functools import wraps
from textblob.exceptions import MissingCorpusException
class cached_property(object):
'''A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Credit to Marcel Hellkamp, author of bottle.py.
'''
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def requires_nltk_corpus(func):
'''Wraps a function that requires an NLTK corpus. If the corpus isn't found,
raise a MissingCorpusException.
'''
@wraps(func)
def decorated(*args, **kwargs):
try:
return func(*args, **kwargs)
except LookupError as err:
print(err)
raise MissingCorpusException()
return decorated
|
a86525047658bf5adcd2133f71fe392a11883916
|
tools/unicode_tests.py
|
tools/unicode_tests.py
|
# coding: utf-8
"""These tests have to be run separately from the main test suite (iptest),
because that sets the default encoding to utf-8, and it cannot be changed after
the interpreter is up and running. The default encoding in a Python 2.x
environment is ASCII."""
import unittest, sys
from IPython.core import compilerop
assert sys.getdefaultencoding() == "ascii"
class CompileropTest(unittest.TestCase):
def test_accept_unicode(self):
cp = compilerop.CachingCompiler()
cp(u"t = 'žćčšđ'", "single")
if __name__ == "__main__":
unittest.main()
|
Test case for the failure to compile code including unicode characters.
|
Test case for the failure to compile code including unicode characters.
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Test case for the failure to compile code including unicode characters.
|
# coding: utf-8
"""These tests have to be run separately from the main test suite (iptest),
because that sets the default encoding to utf-8, and it cannot be changed after
the interpreter is up and running. The default encoding in a Python 2.x
environment is ASCII."""
import unittest, sys
from IPython.core import compilerop
assert sys.getdefaultencoding() == "ascii"
class CompileropTest(unittest.TestCase):
def test_accept_unicode(self):
cp = compilerop.CachingCompiler()
cp(u"t = 'žćčšđ'", "single")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test case for the failure to compile code including unicode characters.<commit_after>
|
# coding: utf-8
"""These tests have to be run separately from the main test suite (iptest),
because that sets the default encoding to utf-8, and it cannot be changed after
the interpreter is up and running. The default encoding in a Python 2.x
environment is ASCII."""
import unittest, sys
from IPython.core import compilerop
assert sys.getdefaultencoding() == "ascii"
class CompileropTest(unittest.TestCase):
def test_accept_unicode(self):
cp = compilerop.CachingCompiler()
cp(u"t = 'žćčšđ'", "single")
if __name__ == "__main__":
unittest.main()
|
Test case for the failure to compile code including unicode characters.# coding: utf-8
"""These tests have to be run separately from the main test suite (iptest),
because that sets the default encoding to utf-8, and it cannot be changed after
the interpreter is up and running. The default encoding in a Python 2.x
environment is ASCII."""
import unittest, sys
from IPython.core import compilerop
assert sys.getdefaultencoding() == "ascii"
class CompileropTest(unittest.TestCase):
def test_accept_unicode(self):
cp = compilerop.CachingCompiler()
cp(u"t = 'žćčšđ'", "single")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test case for the failure to compile code including unicode characters.<commit_after># coding: utf-8
"""These tests have to be run separately from the main test suite (iptest),
because that sets the default encoding to utf-8, and it cannot be changed after
the interpreter is up and running. The default encoding in a Python 2.x
environment is ASCII."""
import unittest, sys
from IPython.core import compilerop
assert sys.getdefaultencoding() == "ascii"
class CompileropTest(unittest.TestCase):
def test_accept_unicode(self):
cp = compilerop.CachingCompiler()
cp(u"t = 'žćčšđ'", "single")
if __name__ == "__main__":
unittest.main()
|
|
eb1daa3edfaa72cad2cb39507b2db0bf95204561
|
markitup/renderers.py
|
markitup/renderers.py
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
parts = publish_parts(source=markup, writer_name="html4css1", settings_overrides=docutils_settings)
return parts["html_body"]
except ImportError:
pass
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
docutils_settings.update({
'raw_enabled': False,
'file_insertion_enabled': False,
})
parts = publish_parts(
source=markup,
writer_name="html4css1",
settings_overrides=docutils_settings,
)
return parts["html_body"]
except ImportError:
pass
|
Enforce better security in sample ReST renderer.
|
Enforce better security in sample ReST renderer.
|
Python
|
bsd-3-clause
|
WimpyAnalytics/django-markitup,carljm/django-markitup,WimpyAnalytics/django-markitup,zsiciarz/django-markitup,zsiciarz/django-markitup,carljm/django-markitup,WimpyAnalytics/django-markitup,carljm/django-markitup,zsiciarz/django-markitup
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
parts = publish_parts(source=markup, writer_name="html4css1", settings_overrides=docutils_settings)
return parts["html_body"]
except ImportError:
pass
Enforce better security in sample ReST renderer.
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
docutils_settings.update({
'raw_enabled': False,
'file_insertion_enabled': False,
})
parts = publish_parts(
source=markup,
writer_name="html4css1",
settings_overrides=docutils_settings,
)
return parts["html_body"]
except ImportError:
pass
|
<commit_before>from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
parts = publish_parts(source=markup, writer_name="html4css1", settings_overrides=docutils_settings)
return parts["html_body"]
except ImportError:
pass
<commit_msg>Enforce better security in sample ReST renderer.<commit_after>
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
docutils_settings.update({
'raw_enabled': False,
'file_insertion_enabled': False,
})
parts = publish_parts(
source=markup,
writer_name="html4css1",
settings_overrides=docutils_settings,
)
return parts["html_body"]
except ImportError:
pass
|
from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
parts = publish_parts(source=markup, writer_name="html4css1", settings_overrides=docutils_settings)
return parts["html_body"]
except ImportError:
pass
Enforce better security in sample ReST renderer.from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
docutils_settings.update({
'raw_enabled': False,
'file_insertion_enabled': False,
})
parts = publish_parts(
source=markup,
writer_name="html4css1",
settings_overrides=docutils_settings,
)
return parts["html_body"]
except ImportError:
pass
|
<commit_before>from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
parts = publish_parts(source=markup, writer_name="html4css1", settings_overrides=docutils_settings)
return parts["html_body"]
except ImportError:
pass
<commit_msg>Enforce better security in sample ReST renderer.<commit_after>from __future__ import unicode_literals
try:
from docutils.core import publish_parts
def render_rest(markup, **docutils_settings):
docutils_settings.update({
'raw_enabled': False,
'file_insertion_enabled': False,
})
parts = publish_parts(
source=markup,
writer_name="html4css1",
settings_overrides=docutils_settings,
)
return parts["html_body"]
except ImportError:
pass
|
c332b83cc543f8e43405cf72e6e7c80b0cafba80
|
datasets/online_products_dataset.py
|
datasets/online_products_dataset.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 17:30:49 2017
@author: sakurai
"""
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel.schemes import SequentialScheme
from fuel.streams import DataStream
class OnlineProductsDataset(H5PYDataset):
_filename = 'online_products/online_products.hdf5'
def __init__(self, which_sets, **kwargs):
super(OnlineProductsDataset, self).__init__(
file_or_path=find_in_data_path(self._filename),
which_sets=which_sets, **kwargs)
def load_as_ndarray(which_sets=['train', 'test']):
datasets = []
for split in which_sets:
data = OnlineProductsDataset([split], load_in_memory=True).data_sources
datasets.append(data)
return datasets
if __name__ == '__main__':
dataset = OnlineProductsDataset(['train'])
st = DataStream(
dataset, iteration_scheme=SequentialScheme(dataset.num_examples, 1))
it = st.get_epoch_iterator()
it.next()
|
Add the dataset for online products dataset
|
Add the dataset for online products dataset
|
Python
|
mit
|
ronekko/deep_metric_learning
|
Add the dataset for online products dataset
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 17:30:49 2017
@author: sakurai
"""
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel.schemes import SequentialScheme
from fuel.streams import DataStream
class OnlineProductsDataset(H5PYDataset):
_filename = 'online_products/online_products.hdf5'
def __init__(self, which_sets, **kwargs):
super(OnlineProductsDataset, self).__init__(
file_or_path=find_in_data_path(self._filename),
which_sets=which_sets, **kwargs)
def load_as_ndarray(which_sets=['train', 'test']):
datasets = []
for split in which_sets:
data = OnlineProductsDataset([split], load_in_memory=True).data_sources
datasets.append(data)
return datasets
if __name__ == '__main__':
dataset = OnlineProductsDataset(['train'])
st = DataStream(
dataset, iteration_scheme=SequentialScheme(dataset.num_examples, 1))
it = st.get_epoch_iterator()
it.next()
|
<commit_before><commit_msg>Add the dataset for online products dataset<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 17:30:49 2017
@author: sakurai
"""
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel.schemes import SequentialScheme
from fuel.streams import DataStream
class OnlineProductsDataset(H5PYDataset):
_filename = 'online_products/online_products.hdf5'
def __init__(self, which_sets, **kwargs):
super(OnlineProductsDataset, self).__init__(
file_or_path=find_in_data_path(self._filename),
which_sets=which_sets, **kwargs)
def load_as_ndarray(which_sets=['train', 'test']):
datasets = []
for split in which_sets:
data = OnlineProductsDataset([split], load_in_memory=True).data_sources
datasets.append(data)
return datasets
if __name__ == '__main__':
dataset = OnlineProductsDataset(['train'])
st = DataStream(
dataset, iteration_scheme=SequentialScheme(dataset.num_examples, 1))
it = st.get_epoch_iterator()
it.next()
|
Add the dataset for online products dataset# -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 17:30:49 2017
@author: sakurai
"""
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel.schemes import SequentialScheme
from fuel.streams import DataStream
class OnlineProductsDataset(H5PYDataset):
_filename = 'online_products/online_products.hdf5'
def __init__(self, which_sets, **kwargs):
super(OnlineProductsDataset, self).__init__(
file_or_path=find_in_data_path(self._filename),
which_sets=which_sets, **kwargs)
def load_as_ndarray(which_sets=['train', 'test']):
datasets = []
for split in which_sets:
data = OnlineProductsDataset([split], load_in_memory=True).data_sources
datasets.append(data)
return datasets
if __name__ == '__main__':
dataset = OnlineProductsDataset(['train'])
st = DataStream(
dataset, iteration_scheme=SequentialScheme(dataset.num_examples, 1))
it = st.get_epoch_iterator()
it.next()
|
<commit_before><commit_msg>Add the dataset for online products dataset<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue Feb 14 17:30:49 2017
@author: sakurai
"""
from fuel.datasets import H5PYDataset
from fuel.utils import find_in_data_path
from fuel.schemes import SequentialScheme
from fuel.streams import DataStream
class OnlineProductsDataset(H5PYDataset):
_filename = 'online_products/online_products.hdf5'
def __init__(self, which_sets, **kwargs):
super(OnlineProductsDataset, self).__init__(
file_or_path=find_in_data_path(self._filename),
which_sets=which_sets, **kwargs)
def load_as_ndarray(which_sets=['train', 'test']):
datasets = []
for split in which_sets:
data = OnlineProductsDataset([split], load_in_memory=True).data_sources
datasets.append(data)
return datasets
if __name__ == '__main__':
dataset = OnlineProductsDataset(['train'])
st = DataStream(
dataset, iteration_scheme=SequentialScheme(dataset.num_examples, 1))
it = st.get_epoch_iterator()
it.next()
|
|
f82c43f3cc1fc74cd23b7ae4b957c464e09fe179
|
AttributeExploration.py
|
AttributeExploration.py
|
#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample)
|
ADD attributeExploration algorithm wrapped in a class
|
ADD attributeExploration algorithm wrapped in a class
|
Python
|
mit
|
jakobkogler/AttributeExploration,jakobkogler/AttributeExploration
|
ADD attributeExploration algorithm wrapped in a class
|
#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample)
|
<commit_before><commit_msg>ADD attributeExploration algorithm wrapped in a class<commit_after>
|
#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample)
|
ADD attributeExploration algorithm wrapped in a class#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample)
|
<commit_before><commit_msg>ADD attributeExploration algorithm wrapped in a class<commit_after>#-------------------------------------------------------------------------------
# Name: AttributeExploration.py
# Purpose: class for attribute exploration
#
# Author: Jakob Kogler
#-------------------------------------------------------------------------------
class AttributeExploration:
def __init__(self, attributes, objects):
self.attributes = attributes
self.attributeCount = len(attributes)
self.objects = objects
self.B = 0
self.implicationsBasis = []
self.waitForResponse = False
def getNextImplication(self):
def Lstar(X):
B = [b for (a,b) in self.implicationsBasis if a & X == a and a != X]
for b in B:
X |= b
return X
if not self.waitForResponse:
for i in reversed(range(self.attributeCount)):
j = self.attributeCount - 1 - i
# m = {m_1, m_2, ..., m_i-1}
m = 2**self.attributeCount - 2 * 2**j
# P = (B cut {m_1, m_2, ..., m_i-1}) union m_i
P = (self.B & m) | 2**j
# L*-operator
LstarP, P = P, -1
while LstarP != P:
LstarP, P = Lstar(LstarP), LstarP
# B <_i L*
if (P & ~self.B & 2**j == 0) or (self.B & m != P & m):
continue
# P**
Pstar = [obj for obj in self.objects if obj & P == P]
Pstarstar = 2**self.attributeCount - 1
for obj in Pstar:
Pstarstar &= obj
if P == Pstarstar:
# P => P, not interesting
self.B = P
return self.getNextImplication()
else:
# interesting implication found
self.implication = (P, Pstarstar)
self.waitForResponse = True
return self.implication
return None
def acceptImplication(self):
if self.waitForResponse:
self.waitForResponse = False
self.implicationsBasis.append(self.implication)
self.B = self.implication[0]
def rejectImplication(self, counterExample):
if self.waitForResponse:
self.waitForResponse = False
self.objects.append(counterExample)
|
|
6eb358fbbe7351c65885d63726d895335832cf3c
|
tests/inheritance/test_multi_level_inheritance.py
|
tests/inheritance/test_multi_level_inheritance.py
|
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class BaseModel(self.Model):
__tablename__ = 'base_model'
__versioned__ = {}
id = sa.Column(sa.Integer, primary_key=True)
discriminator = sa.Column(sa.String(50), index=True)
__mapper_args__ = {
'polymorphic_on': discriminator,
'polymorphic_identity': 'product'
}
class FirstLevel(BaseModel):
__tablename__ = 'first_level'
id = sa.Column(sa.Integer, sa.ForeignKey('base_model.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'first_level'
}
class SecondLevel(FirstLevel):
__mapper_args__ = {
'polymorphic_identity': 'second_level'
}
self.BaseModel = BaseModel
self.FirstLevel = FirstLevel
self.SecondLevel = SecondLevel
def test_sa_inheritance_with_no_distinct_table_has_right_translation_class(self):
class_ = version_class(self.BaseModel)
assert class_.__name__ == 'BaseModelVersion'
assert class_.__table__.name == 'base_model_version'
class_ = version_class(self.FirstLevel)
assert class_.__name__ == 'FirstLevelVersion'
assert class_.__table__.name == 'first_level_version'
class_ = version_class(self.SecondLevel)
assert class_.__name__ == 'SecondLevelVersion'
assert class_.__table__.name == 'first_level_version'
|
Add test for inheritance case.
|
Add test for inheritance case.
|
Python
|
bsd-3-clause
|
kvesteri/sqlalchemy-continuum
|
Add test for inheritance case.
|
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class BaseModel(self.Model):
__tablename__ = 'base_model'
__versioned__ = {}
id = sa.Column(sa.Integer, primary_key=True)
discriminator = sa.Column(sa.String(50), index=True)
__mapper_args__ = {
'polymorphic_on': discriminator,
'polymorphic_identity': 'product'
}
class FirstLevel(BaseModel):
__tablename__ = 'first_level'
id = sa.Column(sa.Integer, sa.ForeignKey('base_model.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'first_level'
}
class SecondLevel(FirstLevel):
__mapper_args__ = {
'polymorphic_identity': 'second_level'
}
self.BaseModel = BaseModel
self.FirstLevel = FirstLevel
self.SecondLevel = SecondLevel
def test_sa_inheritance_with_no_distinct_table_has_right_translation_class(self):
class_ = version_class(self.BaseModel)
assert class_.__name__ == 'BaseModelVersion'
assert class_.__table__.name == 'base_model_version'
class_ = version_class(self.FirstLevel)
assert class_.__name__ == 'FirstLevelVersion'
assert class_.__table__.name == 'first_level_version'
class_ = version_class(self.SecondLevel)
assert class_.__name__ == 'SecondLevelVersion'
assert class_.__table__.name == 'first_level_version'
|
<commit_before><commit_msg>Add test for inheritance case.<commit_after>
|
import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class BaseModel(self.Model):
__tablename__ = 'base_model'
__versioned__ = {}
id = sa.Column(sa.Integer, primary_key=True)
discriminator = sa.Column(sa.String(50), index=True)
__mapper_args__ = {
'polymorphic_on': discriminator,
'polymorphic_identity': 'product'
}
class FirstLevel(BaseModel):
__tablename__ = 'first_level'
id = sa.Column(sa.Integer, sa.ForeignKey('base_model.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'first_level'
}
class SecondLevel(FirstLevel):
__mapper_args__ = {
'polymorphic_identity': 'second_level'
}
self.BaseModel = BaseModel
self.FirstLevel = FirstLevel
self.SecondLevel = SecondLevel
def test_sa_inheritance_with_no_distinct_table_has_right_translation_class(self):
class_ = version_class(self.BaseModel)
assert class_.__name__ == 'BaseModelVersion'
assert class_.__table__.name == 'base_model_version'
class_ = version_class(self.FirstLevel)
assert class_.__name__ == 'FirstLevelVersion'
assert class_.__table__.name == 'first_level_version'
class_ = version_class(self.SecondLevel)
assert class_.__name__ == 'SecondLevelVersion'
assert class_.__table__.name == 'first_level_version'
|
Add test for inheritance case.import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class BaseModel(self.Model):
__tablename__ = 'base_model'
__versioned__ = {}
id = sa.Column(sa.Integer, primary_key=True)
discriminator = sa.Column(sa.String(50), index=True)
__mapper_args__ = {
'polymorphic_on': discriminator,
'polymorphic_identity': 'product'
}
class FirstLevel(BaseModel):
__tablename__ = 'first_level'
id = sa.Column(sa.Integer, sa.ForeignKey('base_model.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'first_level'
}
class SecondLevel(FirstLevel):
__mapper_args__ = {
'polymorphic_identity': 'second_level'
}
self.BaseModel = BaseModel
self.FirstLevel = FirstLevel
self.SecondLevel = SecondLevel
def test_sa_inheritance_with_no_distinct_table_has_right_translation_class(self):
class_ = version_class(self.BaseModel)
assert class_.__name__ == 'BaseModelVersion'
assert class_.__table__.name == 'base_model_version'
class_ = version_class(self.FirstLevel)
assert class_.__name__ == 'FirstLevelVersion'
assert class_.__table__.name == 'first_level_version'
class_ = version_class(self.SecondLevel)
assert class_.__name__ == 'SecondLevelVersion'
assert class_.__table__.name == 'first_level_version'
|
<commit_before><commit_msg>Add test for inheritance case.<commit_after>import sqlalchemy as sa
from sqlalchemy_continuum import version_class
from tests import TestCase
class TestCommonBaseClass(TestCase):
def create_models(self):
class BaseModel(self.Model):
__tablename__ = 'base_model'
__versioned__ = {}
id = sa.Column(sa.Integer, primary_key=True)
discriminator = sa.Column(sa.String(50), index=True)
__mapper_args__ = {
'polymorphic_on': discriminator,
'polymorphic_identity': 'product'
}
class FirstLevel(BaseModel):
__tablename__ = 'first_level'
id = sa.Column(sa.Integer, sa.ForeignKey('base_model.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'first_level'
}
class SecondLevel(FirstLevel):
__mapper_args__ = {
'polymorphic_identity': 'second_level'
}
self.BaseModel = BaseModel
self.FirstLevel = FirstLevel
self.SecondLevel = SecondLevel
def test_sa_inheritance_with_no_distinct_table_has_right_translation_class(self):
class_ = version_class(self.BaseModel)
assert class_.__name__ == 'BaseModelVersion'
assert class_.__table__.name == 'base_model_version'
class_ = version_class(self.FirstLevel)
assert class_.__name__ == 'FirstLevelVersion'
assert class_.__table__.name == 'first_level_version'
class_ = version_class(self.SecondLevel)
assert class_.__name__ == 'SecondLevelVersion'
assert class_.__table__.name == 'first_level_version'
|
|
c7b57a9fcff6741869394ee2e6619db684e3d522
|
scripts/fix_weather_timestamp_errors.py
|
scripts/fix_weather_timestamp_errors.py
|
#!/usr/bin/env python3
"""This is a script for fixing odd weather timestamp values in the database. Sometimes
the timestamp is off by many hours from the previous and this script fixes those."""
from datetime import timedelta
import psycopg
def main():
"""Module main function."""
# pylint: disable=invalid-name
THRESHOLD_HOURS = 3
# Change these as necessary
DB_NAME = 'env_logger'
ROW_LIMIT = 4000
# pylint: disable=not-context-manager
with psycopg.connect(f'dbname={DB_NAME}') as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT id, time FROM weather_data ORDER by id DESC LIMIT %s',
(ROW_LIMIT,))
rows = cursor.fetchall()
i = 0
while i < len(rows):
diff_hours = (rows[i][1] - rows[i + 1][1]).total_seconds() / 3600
if abs(diff_hours) >= THRESHOLD_HOURS:
print(f'The difference {int(diff_hours)} hours of {rows[i]} and {rows[i + 1]} '
f'exceeds {THRESHOLD_HOURS} hours')
if diff_hours > 0:
corr_index = i + 1
corrected = rows[corr_index][1] + timedelta(hours=int(diff_hours))
else:
corr_index = i
corrected = rows[corr_index][1] + timedelta(hours=int(abs(diff_hours)) + 1)
print(f'Correcting timestamp of row ID {rows[corr_index][0]} to {corrected}')
cursor.execute('UPDATE weather_data SET time = %s WHERE id = %s',
(corrected, rows[corr_index][0]))
i += 2
if __name__ == '__main__':
main()
|
Add FMI weather table timestamp fixing script
|
Add FMI weather table timestamp fixing script
|
Python
|
mit
|
terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger,terop/env-logger
|
Add FMI weather table timestamp fixing script
|
#!/usr/bin/env python3
"""This is a script for fixing odd weather timestamp values in the database. Sometimes
the timestamp is off by many hours from the previous and this script fixes those."""
from datetime import timedelta
import psycopg
def main():
"""Module main function."""
# pylint: disable=invalid-name
THRESHOLD_HOURS = 3
# Change these as necessary
DB_NAME = 'env_logger'
ROW_LIMIT = 4000
# pylint: disable=not-context-manager
with psycopg.connect(f'dbname={DB_NAME}') as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT id, time FROM weather_data ORDER by id DESC LIMIT %s',
(ROW_LIMIT,))
rows = cursor.fetchall()
i = 0
while i < len(rows):
diff_hours = (rows[i][1] - rows[i + 1][1]).total_seconds() / 3600
if abs(diff_hours) >= THRESHOLD_HOURS:
print(f'The difference {int(diff_hours)} hours of {rows[i]} and {rows[i + 1]} '
f'exceeds {THRESHOLD_HOURS} hours')
if diff_hours > 0:
corr_index = i + 1
corrected = rows[corr_index][1] + timedelta(hours=int(diff_hours))
else:
corr_index = i
corrected = rows[corr_index][1] + timedelta(hours=int(abs(diff_hours)) + 1)
print(f'Correcting timestamp of row ID {rows[corr_index][0]} to {corrected}')
cursor.execute('UPDATE weather_data SET time = %s WHERE id = %s',
(corrected, rows[corr_index][0]))
i += 2
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add FMI weather table timestamp fixing script<commit_after>
|
#!/usr/bin/env python3
"""This is a script for fixing odd weather timestamp values in the database. Sometimes
the timestamp is off by many hours from the previous and this script fixes those."""
from datetime import timedelta
import psycopg
def main():
"""Module main function."""
# pylint: disable=invalid-name
THRESHOLD_HOURS = 3
# Change these as necessary
DB_NAME = 'env_logger'
ROW_LIMIT = 4000
# pylint: disable=not-context-manager
with psycopg.connect(f'dbname={DB_NAME}') as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT id, time FROM weather_data ORDER by id DESC LIMIT %s',
(ROW_LIMIT,))
rows = cursor.fetchall()
i = 0
while i < len(rows):
diff_hours = (rows[i][1] - rows[i + 1][1]).total_seconds() / 3600
if abs(diff_hours) >= THRESHOLD_HOURS:
print(f'The difference {int(diff_hours)} hours of {rows[i]} and {rows[i + 1]} '
f'exceeds {THRESHOLD_HOURS} hours')
if diff_hours > 0:
corr_index = i + 1
corrected = rows[corr_index][1] + timedelta(hours=int(diff_hours))
else:
corr_index = i
corrected = rows[corr_index][1] + timedelta(hours=int(abs(diff_hours)) + 1)
print(f'Correcting timestamp of row ID {rows[corr_index][0]} to {corrected}')
cursor.execute('UPDATE weather_data SET time = %s WHERE id = %s',
(corrected, rows[corr_index][0]))
i += 2
if __name__ == '__main__':
main()
|
Add FMI weather table timestamp fixing script#!/usr/bin/env python3
"""This is a script for fixing odd weather timestamp values in the database. Sometimes
the timestamp is off by many hours from the previous and this script fixes those."""
from datetime import timedelta
import psycopg
def main():
"""Module main function."""
# pylint: disable=invalid-name
THRESHOLD_HOURS = 3
# Change these as necessary
DB_NAME = 'env_logger'
ROW_LIMIT = 4000
# pylint: disable=not-context-manager
with psycopg.connect(f'dbname={DB_NAME}') as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT id, time FROM weather_data ORDER by id DESC LIMIT %s',
(ROW_LIMIT,))
rows = cursor.fetchall()
i = 0
while i < len(rows):
diff_hours = (rows[i][1] - rows[i + 1][1]).total_seconds() / 3600
if abs(diff_hours) >= THRESHOLD_HOURS:
print(f'The difference {int(diff_hours)} hours of {rows[i]} and {rows[i + 1]} '
f'exceeds {THRESHOLD_HOURS} hours')
if diff_hours > 0:
corr_index = i + 1
corrected = rows[corr_index][1] + timedelta(hours=int(diff_hours))
else:
corr_index = i
corrected = rows[corr_index][1] + timedelta(hours=int(abs(diff_hours)) + 1)
print(f'Correcting timestamp of row ID {rows[corr_index][0]} to {corrected}')
cursor.execute('UPDATE weather_data SET time = %s WHERE id = %s',
(corrected, rows[corr_index][0]))
i += 2
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add FMI weather table timestamp fixing script<commit_after>#!/usr/bin/env python3
"""This is a script for fixing odd weather timestamp values in the database. Sometimes
the timestamp is off by many hours from the previous and this script fixes those."""
from datetime import timedelta
import psycopg
def main():
"""Module main function."""
# pylint: disable=invalid-name
THRESHOLD_HOURS = 3
# Change these as necessary
DB_NAME = 'env_logger'
ROW_LIMIT = 4000
# pylint: disable=not-context-manager
with psycopg.connect(f'dbname={DB_NAME}') as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT id, time FROM weather_data ORDER by id DESC LIMIT %s',
(ROW_LIMIT,))
rows = cursor.fetchall()
i = 0
while i < len(rows):
diff_hours = (rows[i][1] - rows[i + 1][1]).total_seconds() / 3600
if abs(diff_hours) >= THRESHOLD_HOURS:
print(f'The difference {int(diff_hours)} hours of {rows[i]} and {rows[i + 1]} '
f'exceeds {THRESHOLD_HOURS} hours')
if diff_hours > 0:
corr_index = i + 1
corrected = rows[corr_index][1] + timedelta(hours=int(diff_hours))
else:
corr_index = i
corrected = rows[corr_index][1] + timedelta(hours=int(abs(diff_hours)) + 1)
print(f'Correcting timestamp of row ID {rows[corr_index][0]} to {corrected}')
cursor.execute('UPDATE weather_data SET time = %s WHERE id = %s',
(corrected, rows[corr_index][0]))
i += 2
if __name__ == '__main__':
main()
|
|
31d67e804ba44645b701c2624ee30c31023e994e
|
changed_options.py
|
changed_options.py
|
#!/usr/bin/env python3
# file: changed_options.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-03-26 20:53:13 +0200
# Last modified: 2018-03-26 23:03:02 +0200
"""
Get a list of installed packages. For each package, determine if the options
have been changed compared to the default options, and print that.
* The ‘pkg query’ command is used to retrieve the options that are set.
* For determining the default options, ‘make -V OPTIONS_DEFAULT’ is called
from the port directory.
This program requires pkg(8) and the ports tree to be installed.
So this program will run on FreeBSD and maybe DragonflyBSD.
"""
# Imports {{{1
import concurrent.futures as cf
import os
import subprocess as sp
import sys
def run(args): # {{{1
"""
Run a subprocess and return the standard output.
Arguments:
args (list): List of argument strings. Typically a command name
followed by options.
Returns:
Standard output of the program, converted to UTF-8 string.
"""
comp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL)
return comp.stdout.decode('utf-8')
def check(line): # {{{1
"""
Check of a given package uses the default options or
if options have been changed.
Arguments:
line (str): A line of text containing the package name and origin,
separated by whitespace.
Returns:
A string containing the package name and either [CHANGED] or [default].
"""
pkg, origin = line.split()
optionlines = run(['pkg', 'query', '%Ok %Ov', pkg]).splitlines()
options_set = set(opt.split()[0] for opt in optionlines if opt.endswith('on'))
try:
os.chdir('/usr/ports/{}'.format(origin))
except FileNotFoundError:
return ('{}: undetermined'.format(pkg))
default = run(['make', '-V', 'OPTIONS_DEFAULT'])
options_default = set(default.split())
if options_default == options_set:
v = 'default'
else:
v = 'CHANGED'
return '{}: [{}]'.format(pkg, v)
def main(argv): # {{{1
"""
Entry point for changed_options.py.
Arguments:
argv: command line arguments
"""
data = run(['pkg', 'info', '-a', '-o'])
packagelines = data.splitlines()
with cf.ThreadPoolExecutor(max_workers=os.cpu_count()) as tp:
for rv in tp.map(check, packagelines):
print(rv)
if __name__ == '__main__':
main(sys.argv[1:])
|
Add script to detect changed options.
|
Add script to detect changed options.
|
Python
|
mit
|
rsmith-nl/scripts,rsmith-nl/scripts
|
Add script to detect changed options.
|
#!/usr/bin/env python3
# file: changed_options.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-03-26 20:53:13 +0200
# Last modified: 2018-03-26 23:03:02 +0200
"""
Get a list of installed packages. For each package, determine if the options
have been changed compared to the default options, and print that.
* The ‘pkg query’ command is used to retrieve the options that are set.
* For determining the default options, ‘make -V OPTIONS_DEFAULT’ is called
from the port directory.
This program requires pkg(8) and the ports tree to be installed.
So this program will run on FreeBSD and maybe DragonflyBSD.
"""
# Imports {{{1
import concurrent.futures as cf
import os
import subprocess as sp
import sys
def run(args): # {{{1
"""
Run a subprocess and return the standard output.
Arguments:
args (list): List of argument strings. Typically a command name
followed by options.
Returns:
Standard output of the program, converted to UTF-8 string.
"""
comp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL)
return comp.stdout.decode('utf-8')
def check(line): # {{{1
"""
Check of a given package uses the default options or
if options have been changed.
Arguments:
line (str): A line of text containing the package name and origin,
separated by whitespace.
Returns:
A string containing the package name and either [CHANGED] or [default].
"""
pkg, origin = line.split()
optionlines = run(['pkg', 'query', '%Ok %Ov', pkg]).splitlines()
options_set = set(opt.split()[0] for opt in optionlines if opt.endswith('on'))
try:
os.chdir('/usr/ports/{}'.format(origin))
except FileNotFoundError:
return ('{}: undetermined'.format(pkg))
default = run(['make', '-V', 'OPTIONS_DEFAULT'])
options_default = set(default.split())
if options_default == options_set:
v = 'default'
else:
v = 'CHANGED'
return '{}: [{}]'.format(pkg, v)
def main(argv): # {{{1
"""
Entry point for changed_options.py.
Arguments:
argv: command line arguments
"""
data = run(['pkg', 'info', '-a', '-o'])
packagelines = data.splitlines()
with cf.ThreadPoolExecutor(max_workers=os.cpu_count()) as tp:
for rv in tp.map(check, packagelines):
print(rv)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script to detect changed options.<commit_after>
|
#!/usr/bin/env python3
# file: changed_options.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-03-26 20:53:13 +0200
# Last modified: 2018-03-26 23:03:02 +0200
"""
Get a list of installed packages. For each package, determine if the options
have been changed compared to the default options, and print that.
* The ‘pkg query’ command is used to retrieve the options that are set.
* For determining the default options, ‘make -V OPTIONS_DEFAULT’ is called
from the port directory.
This program requires pkg(8) and the ports tree to be installed.
So this program will run on FreeBSD and maybe DragonflyBSD.
"""
# Imports {{{1
import concurrent.futures as cf
import os
import subprocess as sp
import sys
def run(args): # {{{1
"""
Run a subprocess and return the standard output.
Arguments:
args (list): List of argument strings. Typically a command name
followed by options.
Returns:
Standard output of the program, converted to UTF-8 string.
"""
comp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL)
return comp.stdout.decode('utf-8')
def check(line): # {{{1
"""
Check of a given package uses the default options or
if options have been changed.
Arguments:
line (str): A line of text containing the package name and origin,
separated by whitespace.
Returns:
A string containing the package name and either [CHANGED] or [default].
"""
pkg, origin = line.split()
optionlines = run(['pkg', 'query', '%Ok %Ov', pkg]).splitlines()
options_set = set(opt.split()[0] for opt in optionlines if opt.endswith('on'))
try:
os.chdir('/usr/ports/{}'.format(origin))
except FileNotFoundError:
return ('{}: undetermined'.format(pkg))
default = run(['make', '-V', 'OPTIONS_DEFAULT'])
options_default = set(default.split())
if options_default == options_set:
v = 'default'
else:
v = 'CHANGED'
return '{}: [{}]'.format(pkg, v)
def main(argv): # {{{1
"""
Entry point for changed_options.py.
Arguments:
argv: command line arguments
"""
data = run(['pkg', 'info', '-a', '-o'])
packagelines = data.splitlines()
with cf.ThreadPoolExecutor(max_workers=os.cpu_count()) as tp:
for rv in tp.map(check, packagelines):
print(rv)
if __name__ == '__main__':
main(sys.argv[1:])
|
Add script to detect changed options.#!/usr/bin/env python3
# file: changed_options.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-03-26 20:53:13 +0200
# Last modified: 2018-03-26 23:03:02 +0200
"""
Get a list of installed packages. For each package, determine if the options
have been changed compared to the default options, and print that.
* The ‘pkg query’ command is used to retrieve the options that are set.
* For determining the default options, ‘make -V OPTIONS_DEFAULT’ is called
from the port directory.
This program requires pkg(8) and the ports tree to be installed.
So this program will run on FreeBSD and maybe DragonflyBSD.
"""
# Imports {{{1
import concurrent.futures as cf
import os
import subprocess as sp
import sys
def run(args): # {{{1
"""
Run a subprocess and return the standard output.
Arguments:
args (list): List of argument strings. Typically a command name
followed by options.
Returns:
Standard output of the program, converted to UTF-8 string.
"""
comp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL)
return comp.stdout.decode('utf-8')
def check(line): # {{{1
"""
Check of a given package uses the default options or
if options have been changed.
Arguments:
line (str): A line of text containing the package name and origin,
separated by whitespace.
Returns:
A string containing the package name and either [CHANGED] or [default].
"""
pkg, origin = line.split()
optionlines = run(['pkg', 'query', '%Ok %Ov', pkg]).splitlines()
options_set = set(opt.split()[0] for opt in optionlines if opt.endswith('on'))
try:
os.chdir('/usr/ports/{}'.format(origin))
except FileNotFoundError:
return ('{}: undetermined'.format(pkg))
default = run(['make', '-V', 'OPTIONS_DEFAULT'])
options_default = set(default.split())
if options_default == options_set:
v = 'default'
else:
v = 'CHANGED'
return '{}: [{}]'.format(pkg, v)
def main(argv): # {{{1
"""
Entry point for changed_options.py.
Arguments:
argv: command line arguments
"""
data = run(['pkg', 'info', '-a', '-o'])
packagelines = data.splitlines()
with cf.ThreadPoolExecutor(max_workers=os.cpu_count()) as tp:
for rv in tp.map(check, packagelines):
print(rv)
if __name__ == '__main__':
main(sys.argv[1:])
|
<commit_before><commit_msg>Add script to detect changed options.<commit_after>#!/usr/bin/env python3
# file: changed_options.py
# vim:fileencoding=utf-8:fdm=marker:ft=python
#
# Author: R.F. Smith <rsmith@xs4all.nl>
# Created: 2018-03-26 20:53:13 +0200
# Last modified: 2018-03-26 23:03:02 +0200
"""
Get a list of installed packages. For each package, determine if the options
have been changed compared to the default options, and print that.
* The ‘pkg query’ command is used to retrieve the options that are set.
* For determining the default options, ‘make -V OPTIONS_DEFAULT’ is called
from the port directory.
This program requires pkg(8) and the ports tree to be installed.
So this program will run on FreeBSD and maybe DragonflyBSD.
"""
# Imports {{{1
import concurrent.futures as cf
import os
import subprocess as sp
import sys
def run(args): # {{{1
"""
Run a subprocess and return the standard output.
Arguments:
args (list): List of argument strings. Typically a command name
followed by options.
Returns:
Standard output of the program, converted to UTF-8 string.
"""
comp = sp.run(args, stdout=sp.PIPE, stderr=sp.DEVNULL)
return comp.stdout.decode('utf-8')
def check(line): # {{{1
"""
Check of a given package uses the default options or
if options have been changed.
Arguments:
line (str): A line of text containing the package name and origin,
separated by whitespace.
Returns:
A string containing the package name and either [CHANGED] or [default].
"""
pkg, origin = line.split()
optionlines = run(['pkg', 'query', '%Ok %Ov', pkg]).splitlines()
options_set = set(opt.split()[0] for opt in optionlines if opt.endswith('on'))
try:
os.chdir('/usr/ports/{}'.format(origin))
except FileNotFoundError:
return ('{}: undetermined'.format(pkg))
default = run(['make', '-V', 'OPTIONS_DEFAULT'])
options_default = set(default.split())
if options_default == options_set:
v = 'default'
else:
v = 'CHANGED'
return '{}: [{}]'.format(pkg, v)
def main(argv): # {{{1
"""
Entry point for changed_options.py.
Arguments:
argv: command line arguments
"""
data = run(['pkg', 'info', '-a', '-o'])
packagelines = data.splitlines()
with cf.ThreadPoolExecutor(max_workers=os.cpu_count()) as tp:
for rv in tp.map(check, packagelines):
print(rv)
if __name__ == '__main__':
main(sys.argv[1:])
|
|
bfe5ebae0261e49045e468bc183f54bcd7fbeafc
|
openfisca_core/scripts/xml_to_json/xml_to_json_extension_template.py
|
openfisca_core/scripts/xml_to_json/xml_to_json_extension_template.py
|
# -*- coding: utf-8 -*-
''' xml_to_json_extension_template.py : Parse XML parameter files for Extension-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_extension_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_extension_template.py`
'''
import sys
import os
from openfisca_core.scripts.xml_to_json import xml_to_json
import openfisca_extension_template
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.dirname(openfisca_extension_template.__file__)
param_files = [
'parameters.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
Add script to transform ExtensionTemplate legilation
|
Add script to transform ExtensionTemplate legilation
|
Python
|
agpl-3.0
|
openfisca/openfisca-core,openfisca/openfisca-core
|
Add script to transform ExtensionTemplate legilation
|
# -*- coding: utf-8 -*-
''' xml_to_json_extension_template.py : Parse XML parameter files for Extension-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_extension_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_extension_template.py`
'''
import sys
import os
from openfisca_core.scripts.xml_to_json import xml_to_json
import openfisca_extension_template
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.dirname(openfisca_extension_template.__file__)
param_files = [
'parameters.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
<commit_before><commit_msg>Add script to transform ExtensionTemplate legilation<commit_after>
|
# -*- coding: utf-8 -*-
''' xml_to_json_extension_template.py : Parse XML parameter files for Extension-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_extension_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_extension_template.py`
'''
import sys
import os
from openfisca_core.scripts.xml_to_json import xml_to_json
import openfisca_extension_template
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.dirname(openfisca_extension_template.__file__)
param_files = [
'parameters.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
Add script to transform ExtensionTemplate legilation# -*- coding: utf-8 -*-
''' xml_to_json_extension_template.py : Parse XML parameter files for Extension-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_extension_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_extension_template.py`
'''
import sys
import os
from openfisca_core.scripts.xml_to_json import xml_to_json
import openfisca_extension_template
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.dirname(openfisca_extension_template.__file__)
param_files = [
'parameters.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
<commit_before><commit_msg>Add script to transform ExtensionTemplate legilation<commit_after># -*- coding: utf-8 -*-
''' xml_to_json_extension_template.py : Parse XML parameter files for Extension-Template and convert them to YAML files. Comments are NOT transformed.
Usage :
`python xml_to_json_extension_template.py output_dir`
or just (output is written in a directory called `yaml_parameters`):
`python xml_to_json_extension_template.py`
'''
import sys
import os
from openfisca_core.scripts.xml_to_json import xml_to_json
import openfisca_extension_template
if len(sys.argv) > 1:
target_path = sys.argv[1]
else:
target_path = 'yaml_parameters'
param_dir = os.path.dirname(openfisca_extension_template.__file__)
param_files = [
'parameters.xml',
]
legislation_xml_info_list = [
(os.path.join(param_dir, param_file), [])
for param_file in param_files
]
xml_to_json.write_legislation(legislation_xml_info_list, target_path)
|
|
18f983cf035704588f904d966f8bf10ca4a16b01
|
src/mmw/apps/modeling/migrations/0040_clear_nlcd2019_tr55_results.py
|
src/mmw/apps/modeling/migrations/0040_clear_nlcd2019_tr55_results.py
|
# Generated by Django 3.2.13 on 2022-04-20 23:35
from django.db import migrations
def clear_nlcd2019_tr55_results(apps, schema_editor):
"""
Clear the results For all scenarios belonging to TR-55 projects made after
the release of 1.33.0, which switched NLCD19 2019 to be the default on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These will be recalculated with NLCD11 2011 the next time it is opened.
"""
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.filter(
project__model_package='tr-55',
project__created_at__gte='2022-01-17'
).update(
results='[]',
modification_hash=''
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0039_override_sedaadjust_for_old_scenarios'),
]
operations = [
migrations.RunPython(clear_nlcd2019_tr55_results)
]
|
Clear all NLCD19 2019 TR-55 results
|
Clear all NLCD19 2019 TR-55 results
Since we're switching TR-55 projects back to NLCD11 2011,
this migration clears the results for all TR-55 projects
made since 1.33.0, which switched the default to NLCD19 2019.
These results will be recalculated with NLCD11 2011 whenever
they are opened next in the UI.
|
Python
|
apache-2.0
|
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
|
Clear all NLCD19 2019 TR-55 results
Since we're switching TR-55 projects back to NLCD11 2011,
this migration clears the results for all TR-55 projects
made since 1.33.0, which switched the default to NLCD19 2019.
These results will be recalculated with NLCD11 2011 whenever
they are opened next in the UI.
|
# Generated by Django 3.2.13 on 2022-04-20 23:35
from django.db import migrations
def clear_nlcd2019_tr55_results(apps, schema_editor):
"""
Clear the results For all scenarios belonging to TR-55 projects made after
the release of 1.33.0, which switched NLCD19 2019 to be the default on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These will be recalculated with NLCD11 2011 the next time it is opened.
"""
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.filter(
project__model_package='tr-55',
project__created_at__gte='2022-01-17'
).update(
results='[]',
modification_hash=''
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0039_override_sedaadjust_for_old_scenarios'),
]
operations = [
migrations.RunPython(clear_nlcd2019_tr55_results)
]
|
<commit_before><commit_msg>Clear all NLCD19 2019 TR-55 results
Since we're switching TR-55 projects back to NLCD11 2011,
this migration clears the results for all TR-55 projects
made since 1.33.0, which switched the default to NLCD19 2019.
These results will be recalculated with NLCD11 2011 whenever
they are opened next in the UI.<commit_after>
|
# Generated by Django 3.2.13 on 2022-04-20 23:35
from django.db import migrations
def clear_nlcd2019_tr55_results(apps, schema_editor):
"""
Clear the results For all scenarios belonging to TR-55 projects made after
the release of 1.33.0, which switched NLCD19 2019 to be the default on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These will be recalculated with NLCD11 2011 the next time it is opened.
"""
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.filter(
project__model_package='tr-55',
project__created_at__gte='2022-01-17'
).update(
results='[]',
modification_hash=''
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0039_override_sedaadjust_for_old_scenarios'),
]
operations = [
migrations.RunPython(clear_nlcd2019_tr55_results)
]
|
Clear all NLCD19 2019 TR-55 results
Since we're switching TR-55 projects back to NLCD11 2011,
this migration clears the results for all TR-55 projects
made since 1.33.0, which switched the default to NLCD19 2019.
These results will be recalculated with NLCD11 2011 whenever
they are opened next in the UI.# Generated by Django 3.2.13 on 2022-04-20 23:35
from django.db import migrations
def clear_nlcd2019_tr55_results(apps, schema_editor):
"""
Clear the results For all scenarios belonging to TR-55 projects made after
the release of 1.33.0, which switched NLCD19 2019 to be the default on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These will be recalculated with NLCD11 2011 the next time it is opened.
"""
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.filter(
project__model_package='tr-55',
project__created_at__gte='2022-01-17'
).update(
results='[]',
modification_hash=''
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0039_override_sedaadjust_for_old_scenarios'),
]
operations = [
migrations.RunPython(clear_nlcd2019_tr55_results)
]
|
<commit_before><commit_msg>Clear all NLCD19 2019 TR-55 results
Since we're switching TR-55 projects back to NLCD11 2011,
this migration clears the results for all TR-55 projects
made since 1.33.0, which switched the default to NLCD19 2019.
These results will be recalculated with NLCD11 2011 whenever
they are opened next in the UI.<commit_after># Generated by Django 3.2.13 on 2022-04-20 23:35
from django.db import migrations
def clear_nlcd2019_tr55_results(apps, schema_editor):
"""
Clear the results For all scenarios belonging to TR-55 projects made after
the release of 1.33.0, which switched NLCD19 2019 to be the default on
2022-01-17:
https://github.com/WikiWatershed/model-my-watershed/releases/tag/1.33.0
These will be recalculated with NLCD11 2011 the next time it is opened.
"""
Scenario = apps.get_model('modeling', 'Scenario')
Scenario.objects.filter(
project__model_package='tr-55',
project__created_at__gte='2022-01-17'
).update(
results='[]',
modification_hash=''
)
class Migration(migrations.Migration):
dependencies = [
('modeling', '0039_override_sedaadjust_for_old_scenarios'),
]
operations = [
migrations.RunPython(clear_nlcd2019_tr55_results)
]
|
|
727e57b8f639a471423c4b5a87af594632ae609d
|
scripts/tools/make_manhole.py
|
scripts/tools/make_manhole.py
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a .manhole for all masters."""
import getpass
import os
import optparse
import subprocess
import sys
def check_output(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = p.communicate(None)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', default=getpass.getuser())
parser.add_option('-p', '--port', type='int', help='Base port')
parser.add_option('-r', '--root', default=os.getcwd(), help='Path to masters')
options, args = parser.parse_args(None)
if args:
parser.error('Have you tried not using the wrong argument?')
if not options.port:
parser.error('Use --port to specify a base port')
if not os.path.basename(options.root) == 'masters':
parser.error('Use --root or cd into the masters directory')
try:
check_output(['apg', '-q', '-n', '1'])
except subprocess.CalledProcessError:
parser.error('Run sudo apt-get install apg')
for i in os.listdir(options.root):
dirpath = os.path.join(options.root, i)
if not os.path.isdir(dirpath):
continue
filepath = os.path.join(dirpath, '.manhole')
if os.path.isfile(filepath):
print '%-30s already had .manhole' % i
continue
print '%-30s Generating password' % i
password = check_output(['apg', '-q', '-n', '1']).strip()
content = "user='%s'\npassword='/!%s'\nport=%d\n" % (
options.user, password, options.port)
options.port += 1
open(filepath, 'w').write(content)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add tool to generate .manhole files.
|
Add tool to generate .manhole files.
This is cleaner than creating them one by one.
R=cmp@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/8347006
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@106120 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
eunchong/build,eunchong/build,eunchong/build,eunchong/build
|
Add tool to generate .manhole files.
This is cleaner than creating them one by one.
R=cmp@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/8347006
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@106120 0039d316-1c4b-4281-b951-d872f2087c98
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a .manhole for all masters."""
import getpass
import os
import optparse
import subprocess
import sys
def check_output(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = p.communicate(None)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', default=getpass.getuser())
parser.add_option('-p', '--port', type='int', help='Base port')
parser.add_option('-r', '--root', default=os.getcwd(), help='Path to masters')
options, args = parser.parse_args(None)
if args:
parser.error('Have you tried not using the wrong argument?')
if not options.port:
parser.error('Use --port to specify a base port')
if not os.path.basename(options.root) == 'masters':
parser.error('Use --root or cd into the masters directory')
try:
check_output(['apg', '-q', '-n', '1'])
except subprocess.CalledProcessError:
parser.error('Run sudo apt-get install apg')
for i in os.listdir(options.root):
dirpath = os.path.join(options.root, i)
if not os.path.isdir(dirpath):
continue
filepath = os.path.join(dirpath, '.manhole')
if os.path.isfile(filepath):
print '%-30s already had .manhole' % i
continue
print '%-30s Generating password' % i
password = check_output(['apg', '-q', '-n', '1']).strip()
content = "user='%s'\npassword='/!%s'\nport=%d\n" % (
options.user, password, options.port)
options.port += 1
open(filepath, 'w').write(content)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add tool to generate .manhole files.
This is cleaner than creating them one by one.
R=cmp@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/8347006
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@106120 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a .manhole for all masters."""
import getpass
import os
import optparse
import subprocess
import sys
def check_output(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = p.communicate(None)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', default=getpass.getuser())
parser.add_option('-p', '--port', type='int', help='Base port')
parser.add_option('-r', '--root', default=os.getcwd(), help='Path to masters')
options, args = parser.parse_args(None)
if args:
parser.error('Have you tried not using the wrong argument?')
if not options.port:
parser.error('Use --port to specify a base port')
if not os.path.basename(options.root) == 'masters':
parser.error('Use --root or cd into the masters directory')
try:
check_output(['apg', '-q', '-n', '1'])
except subprocess.CalledProcessError:
parser.error('Run sudo apt-get install apg')
for i in os.listdir(options.root):
dirpath = os.path.join(options.root, i)
if not os.path.isdir(dirpath):
continue
filepath = os.path.join(dirpath, '.manhole')
if os.path.isfile(filepath):
print '%-30s already had .manhole' % i
continue
print '%-30s Generating password' % i
password = check_output(['apg', '-q', '-n', '1']).strip()
content = "user='%s'\npassword='/!%s'\nport=%d\n" % (
options.user, password, options.port)
options.port += 1
open(filepath, 'w').write(content)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add tool to generate .manhole files.
This is cleaner than creating them one by one.
R=cmp@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/8347006
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@106120 0039d316-1c4b-4281-b951-d872f2087c98#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a .manhole for all masters."""
import getpass
import os
import optparse
import subprocess
import sys
def check_output(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = p.communicate(None)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', default=getpass.getuser())
parser.add_option('-p', '--port', type='int', help='Base port')
parser.add_option('-r', '--root', default=os.getcwd(), help='Path to masters')
options, args = parser.parse_args(None)
if args:
parser.error('Have you tried not using the wrong argument?')
if not options.port:
parser.error('Use --port to specify a base port')
if not os.path.basename(options.root) == 'masters':
parser.error('Use --root or cd into the masters directory')
try:
check_output(['apg', '-q', '-n', '1'])
except subprocess.CalledProcessError:
parser.error('Run sudo apt-get install apg')
for i in os.listdir(options.root):
dirpath = os.path.join(options.root, i)
if not os.path.isdir(dirpath):
continue
filepath = os.path.join(dirpath, '.manhole')
if os.path.isfile(filepath):
print '%-30s already had .manhole' % i
continue
print '%-30s Generating password' % i
password = check_output(['apg', '-q', '-n', '1']).strip()
content = "user='%s'\npassword='/!%s'\nport=%d\n" % (
options.user, password, options.port)
options.port += 1
open(filepath, 'w').write(content)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add tool to generate .manhole files.
This is cleaner than creating them one by one.
R=cmp@chromium.org
BUG=
TEST=
Review URL: http://codereview.chromium.org/8347006
git-svn-id: 239fca9b83025a0b6f823aeeca02ba5be3d9fd76@106120 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Generate a .manhole for all masters."""
import getpass
import os
import optparse
import subprocess
import sys
def check_output(cmd):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout = p.communicate(None)[0]
if p.returncode:
raise subprocess.CalledProcessError(p.returncode, cmd)
return stdout
def main():
parser = optparse.OptionParser()
parser.add_option('-u', '--user', default=getpass.getuser())
parser.add_option('-p', '--port', type='int', help='Base port')
parser.add_option('-r', '--root', default=os.getcwd(), help='Path to masters')
options, args = parser.parse_args(None)
if args:
parser.error('Have you tried not using the wrong argument?')
if not options.port:
parser.error('Use --port to specify a base port')
if not os.path.basename(options.root) == 'masters':
parser.error('Use --root or cd into the masters directory')
try:
check_output(['apg', '-q', '-n', '1'])
except subprocess.CalledProcessError:
parser.error('Run sudo apt-get install apg')
for i in os.listdir(options.root):
dirpath = os.path.join(options.root, i)
if not os.path.isdir(dirpath):
continue
filepath = os.path.join(dirpath, '.manhole')
if os.path.isfile(filepath):
print '%-30s already had .manhole' % i
continue
print '%-30s Generating password' % i
password = check_output(['apg', '-q', '-n', '1']).strip()
content = "user='%s'\npassword='/!%s'\nport=%d\n" % (
options.user, password, options.port)
options.port += 1
open(filepath, 'w').write(content)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
e471fa49409d45b2b76b12ac63fb6487466be174
|
csunplugged/utils/retrieve_query_parameter.py
|
csunplugged/utils/retrieve_query_parameter.py
|
"""Module for retrieving a GET request query parameter."""
from django.http import Http404
def retrieve_query_parameter(request, parameter, valid_options=None):
"""Retrieve the query parameter.
If the parameter cannot be found, or is not found in the list of
valid options, then a 404 error is raised.
Args:
request: Request object (Request).
parameter: Parameter to retrieve (str).
valid_options: If provided, a list of valid options (list of str).
Returns:
String value of parameter.
"""
value = request.get(parameter, None)
if value is None:
raise Http404("{} parameter not specified.".format(parameter))
if valid_options and value not in valid_options:
raise Http404("{} parameter not valid.".format(parameter))
return value
|
Add utility function for retrieving get parameters
|
Add utility function for retrieving get parameters
|
Python
|
mit
|
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
|
Add utility function for retrieving get parameters
|
"""Module for retrieving a GET request query parameter."""
from django.http import Http404
def retrieve_query_parameter(request, parameter, valid_options=None):
"""Retrieve the query parameter.
If the parameter cannot be found, or is not found in the list of
valid options, then a 404 error is raised.
Args:
request: Request object (Request).
parameter: Parameter to retrieve (str).
valid_options: If provided, a list of valid options (list of str).
Returns:
String value of parameter.
"""
value = request.get(parameter, None)
if value is None:
raise Http404("{} parameter not specified.".format(parameter))
if valid_options and value not in valid_options:
raise Http404("{} parameter not valid.".format(parameter))
return value
|
<commit_before><commit_msg>Add utility function for retrieving get parameters<commit_after>
|
"""Module for retrieving a GET request query parameter."""
from django.http import Http404
def retrieve_query_parameter(request, parameter, valid_options=None):
"""Retrieve the query parameter.
If the parameter cannot be found, or is not found in the list of
valid options, then a 404 error is raised.
Args:
request: Request object (Request).
parameter: Parameter to retrieve (str).
valid_options: If provided, a list of valid options (list of str).
Returns:
String value of parameter.
"""
value = request.get(parameter, None)
if value is None:
raise Http404("{} parameter not specified.".format(parameter))
if valid_options and value not in valid_options:
raise Http404("{} parameter not valid.".format(parameter))
return value
|
Add utility function for retrieving get parameters"""Module for retrieving a GET request query parameter."""
from django.http import Http404
def retrieve_query_parameter(request, parameter, valid_options=None):
"""Retrieve the query parameter.
If the parameter cannot be found, or is not found in the list of
valid options, then a 404 error is raised.
Args:
request: Request object (Request).
parameter: Parameter to retrieve (str).
valid_options: If provided, a list of valid options (list of str).
Returns:
String value of parameter.
"""
value = request.get(parameter, None)
if value is None:
raise Http404("{} parameter not specified.".format(parameter))
if valid_options and value not in valid_options:
raise Http404("{} parameter not valid.".format(parameter))
return value
|
<commit_before><commit_msg>Add utility function for retrieving get parameters<commit_after>"""Module for retrieving a GET request query parameter."""
from django.http import Http404
def retrieve_query_parameter(request, parameter, valid_options=None):
"""Retrieve the query parameter.
If the parameter cannot be found, or is not found in the list of
valid options, then a 404 error is raised.
Args:
request: Request object (Request).
parameter: Parameter to retrieve (str).
valid_options: If provided, a list of valid options (list of str).
Returns:
String value of parameter.
"""
value = request.get(parameter, None)
if value is None:
raise Http404("{} parameter not specified.".format(parameter))
if valid_options and value not in valid_options:
raise Http404("{} parameter not valid.".format(parameter))
return value
|
|
2865af9eba55f1b3bbf14d26fd9691925fde8f5e
|
py/reconstruct-original-digits-from-english.py
|
py/reconstruct-original-digits-from-english.py
|
from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
phase1 = dict(
g=(8, 'eight'),
u=(4, 'four'),
w=(2, 'two'),
x=(6, 'six'),
z=(0, 'zero')
)
phase2 = dict(
t=(3, 'thre'),
s=(7, 'sevn'),
o=(1, 'one'),
f=(5, 'five')
)
phase3 = dict(
i=(9, 'nie')
)
counter = Counter(s)
ansCounter = Counter()
for k, (n, w) in phase1.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase2.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase3.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
return ''.join(str(i) * ansCounter[i] for i in xrange(10))
|
Add py solution for 423. Reconstruct Original Digits from English
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/
|
from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
phase1 = dict(
g=(8, 'eight'),
u=(4, 'four'),
w=(2, 'two'),
x=(6, 'six'),
z=(0, 'zero')
)
phase2 = dict(
t=(3, 'thre'),
s=(7, 'sevn'),
o=(1, 'one'),
f=(5, 'five')
)
phase3 = dict(
i=(9, 'nie')
)
counter = Counter(s)
ansCounter = Counter()
for k, (n, w) in phase1.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase2.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase3.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
return ''.join(str(i) * ansCounter[i] for i in xrange(10))
|
<commit_before><commit_msg>Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/<commit_after>
|
from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
phase1 = dict(
g=(8, 'eight'),
u=(4, 'four'),
w=(2, 'two'),
x=(6, 'six'),
z=(0, 'zero')
)
phase2 = dict(
t=(3, 'thre'),
s=(7, 'sevn'),
o=(1, 'one'),
f=(5, 'five')
)
phase3 = dict(
i=(9, 'nie')
)
counter = Counter(s)
ansCounter = Counter()
for k, (n, w) in phase1.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase2.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase3.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
return ''.join(str(i) * ansCounter[i] for i in xrange(10))
|
Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
phase1 = dict(
g=(8, 'eight'),
u=(4, 'four'),
w=(2, 'two'),
x=(6, 'six'),
z=(0, 'zero')
)
phase2 = dict(
t=(3, 'thre'),
s=(7, 'sevn'),
o=(1, 'one'),
f=(5, 'five')
)
phase3 = dict(
i=(9, 'nie')
)
counter = Counter(s)
ansCounter = Counter()
for k, (n, w) in phase1.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase2.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase3.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
return ''.join(str(i) * ansCounter[i] for i in xrange(10))
|
<commit_before><commit_msg>Add py solution for 423. Reconstruct Original Digits from English
423. Reconstruct Original Digits from English: https://leetcode.com/problems/reconstruct-original-digits-from-english/<commit_after>from collections import Counter
class Solution(object):
def originalDigits(self, s):
"""
:type s: str
:rtype: str
"""
phase1 = dict(
g=(8, 'eight'),
u=(4, 'four'),
w=(2, 'two'),
x=(6, 'six'),
z=(0, 'zero')
)
phase2 = dict(
t=(3, 'thre'),
s=(7, 'sevn'),
o=(1, 'one'),
f=(5, 'five')
)
phase3 = dict(
i=(9, 'nie')
)
counter = Counter(s)
ansCounter = Counter()
for k, (n, w) in phase1.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase2.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
for k, (n, w) in phase3.iteritems():
ck = counter[k]
ansCounter[n] += ck
for c in w:
counter[c] -= ck
return ''.join(str(i) * ansCounter[i] for i in xrange(10))
|
|
09911a0fe8135fa2534c9d6e708e688fcfe54ca7
|
analysis/check_calls.py
|
analysis/check_calls.py
|
import os
import click
import cv2
from AppKit import NSScreen
def check_image(filename, height):
image = cv2.imread(filename)
image_name = os.path.basename(filename)
is_positive = _check(image, image_name, height)
return image_name, is_positive
def _check(image, image_name, target_height):
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
while True:
height, width = image.shape[:2]
scaling_f = height / target_height
small = cv2.resize(image, None, fx=1/scaling_f, fy=1/scaling_f)
# display the image and wait for a keypress
cv2.imshow(image_name, small)
key = cv2.waitKey(1)
# if the 'y' key is pressed return True
if key == ord("y"):
cv2.destroyWindow(image_name)
print('True')
return True
# if the 'n' key is pressed return False
elif key == ord("n"):
print('False')
cv2.destroyWindow(image_name)
return False
@click.command()
@click.argument('files',
nargs=-1, type=click.Path(exists=True))
@click.option('--output_path',
help='Write result here',
default=None,
type=click.Path(exists=False))
def check_files(files, output_path):
height = NSScreen.mainScreen().frame().size.height
with open(output_path, 'w') as out:
for file in files:
image_name, is_positive = check_image(file, height)
template = "%s\t%s\n"
out.write(template % (image_name, is_positive))
if __name__ == "__main__":
check_files()
|
Add a small script for rapidly verifyinh IGV screenshots
|
Add a small script for rapidly verifyinh IGV screenshots
|
Python
|
mit
|
bardin-lab/readtagger,bardin-lab/readtagger
|
Add a small script for rapidly verifyinh IGV screenshots
|
import os
import click
import cv2
from AppKit import NSScreen
def check_image(filename, height):
image = cv2.imread(filename)
image_name = os.path.basename(filename)
is_positive = _check(image, image_name, height)
return image_name, is_positive
def _check(image, image_name, target_height):
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
while True:
height, width = image.shape[:2]
scaling_f = height / target_height
small = cv2.resize(image, None, fx=1/scaling_f, fy=1/scaling_f)
# display the image and wait for a keypress
cv2.imshow(image_name, small)
key = cv2.waitKey(1)
# if the 'y' key is pressed return True
if key == ord("y"):
cv2.destroyWindow(image_name)
print('True')
return True
# if the 'n' key is pressed return False
elif key == ord("n"):
print('False')
cv2.destroyWindow(image_name)
return False
@click.command()
@click.argument('files',
nargs=-1, type=click.Path(exists=True))
@click.option('--output_path',
help='Write result here',
default=None,
type=click.Path(exists=False))
def check_files(files, output_path):
height = NSScreen.mainScreen().frame().size.height
with open(output_path, 'w') as out:
for file in files:
image_name, is_positive = check_image(file, height)
template = "%s\t%s\n"
out.write(template % (image_name, is_positive))
if __name__ == "__main__":
check_files()
|
<commit_before><commit_msg>Add a small script for rapidly verifyinh IGV screenshots<commit_after>
|
import os
import click
import cv2
from AppKit import NSScreen
def check_image(filename, height):
image = cv2.imread(filename)
image_name = os.path.basename(filename)
is_positive = _check(image, image_name, height)
return image_name, is_positive
def _check(image, image_name, target_height):
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
while True:
height, width = image.shape[:2]
scaling_f = height / target_height
small = cv2.resize(image, None, fx=1/scaling_f, fy=1/scaling_f)
# display the image and wait for a keypress
cv2.imshow(image_name, small)
key = cv2.waitKey(1)
# if the 'y' key is pressed return True
if key == ord("y"):
cv2.destroyWindow(image_name)
print('True')
return True
# if the 'n' key is pressed return False
elif key == ord("n"):
print('False')
cv2.destroyWindow(image_name)
return False
@click.command()
@click.argument('files',
nargs=-1, type=click.Path(exists=True))
@click.option('--output_path',
help='Write result here',
default=None,
type=click.Path(exists=False))
def check_files(files, output_path):
height = NSScreen.mainScreen().frame().size.height
with open(output_path, 'w') as out:
for file in files:
image_name, is_positive = check_image(file, height)
template = "%s\t%s\n"
out.write(template % (image_name, is_positive))
if __name__ == "__main__":
check_files()
|
Add a small script for rapidly verifyinh IGV screenshotsimport os
import click
import cv2
from AppKit import NSScreen
def check_image(filename, height):
image = cv2.imread(filename)
image_name = os.path.basename(filename)
is_positive = _check(image, image_name, height)
return image_name, is_positive
def _check(image, image_name, target_height):
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
while True:
height, width = image.shape[:2]
scaling_f = height / target_height
small = cv2.resize(image, None, fx=1/scaling_f, fy=1/scaling_f)
# display the image and wait for a keypress
cv2.imshow(image_name, small)
key = cv2.waitKey(1)
# if the 'y' key is pressed return True
if key == ord("y"):
cv2.destroyWindow(image_name)
print('True')
return True
# if the 'n' key is pressed return False
elif key == ord("n"):
print('False')
cv2.destroyWindow(image_name)
return False
@click.command()
@click.argument('files',
nargs=-1, type=click.Path(exists=True))
@click.option('--output_path',
help='Write result here',
default=None,
type=click.Path(exists=False))
def check_files(files, output_path):
height = NSScreen.mainScreen().frame().size.height
with open(output_path, 'w') as out:
for file in files:
image_name, is_positive = check_image(file, height)
template = "%s\t%s\n"
out.write(template % (image_name, is_positive))
if __name__ == "__main__":
check_files()
|
<commit_before><commit_msg>Add a small script for rapidly verifyinh IGV screenshots<commit_after>import os
import click
import cv2
from AppKit import NSScreen
def check_image(filename, height):
image = cv2.imread(filename)
image_name = os.path.basename(filename)
is_positive = _check(image, image_name, height)
return image_name, is_positive
def _check(image, image_name, target_height):
cv2.namedWindow(image_name, cv2.WINDOW_NORMAL)
while True:
height, width = image.shape[:2]
scaling_f = height / target_height
small = cv2.resize(image, None, fx=1/scaling_f, fy=1/scaling_f)
# display the image and wait for a keypress
cv2.imshow(image_name, small)
key = cv2.waitKey(1)
# if the 'y' key is pressed return True
if key == ord("y"):
cv2.destroyWindow(image_name)
print('True')
return True
# if the 'n' key is pressed return False
elif key == ord("n"):
print('False')
cv2.destroyWindow(image_name)
return False
@click.command()
@click.argument('files',
nargs=-1, type=click.Path(exists=True))
@click.option('--output_path',
help='Write result here',
default=None,
type=click.Path(exists=False))
def check_files(files, output_path):
height = NSScreen.mainScreen().frame().size.height
with open(output_path, 'w') as out:
for file in files:
image_name, is_positive = check_image(file, height)
template = "%s\t%s\n"
out.write(template % (image_name, is_positive))
if __name__ == "__main__":
check_files()
|
|
10782310cfee0d2c2938748056f6549b5918b969
|
src/sentry/debug/utils/patch_context.py
|
src/sentry/debug/utils/patch_context.py
|
from __future__ import absolute_import
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.func = getattr(target, attr)
self.target = target
self.attr = attr
self.callback = callback
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
setattr(self.target, self.attr, self.func)
|
from __future__ import absolute_import
from threading import Lock
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.target = target
self.attr = attr
self.callback = callback
self._lock = Lock()
with self._lock:
self.func = getattr(target, attr)
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
with self._lock:
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
with self._lock:
setattr(self.target, self.attr, self.func)
|
Use a thread lock to patch contexts.
|
Use a thread lock to patch contexts.
This fixes #3185
|
Python
|
bsd-3-clause
|
looker/sentry,zenefits/sentry,mvaled/sentry,alexm92/sentry,alexm92/sentry,looker/sentry,gencer/sentry,ifduyue/sentry,jean/sentry,JackDanger/sentry,JackDanger/sentry,ifduyue/sentry,BuildingLink/sentry,gencer/sentry,beeftornado/sentry,BuildingLink/sentry,mvaled/sentry,JamesMura/sentry,jean/sentry,zenefits/sentry,zenefits/sentry,mvaled/sentry,mvaled/sentry,jean/sentry,JamesMura/sentry,zenefits/sentry,mvaled/sentry,JamesMura/sentry,fotinakis/sentry,ifduyue/sentry,gencer/sentry,JackDanger/sentry,mitsuhiko/sentry,jean/sentry,zenefits/sentry,mvaled/sentry,gencer/sentry,BuildingLink/sentry,fotinakis/sentry,JamesMura/sentry,mitsuhiko/sentry,beeftornado/sentry,beeftornado/sentry,fotinakis/sentry,ifduyue/sentry,alexm92/sentry,BuildingLink/sentry,JamesMura/sentry,looker/sentry,gencer/sentry,looker/sentry,fotinakis/sentry,looker/sentry,BuildingLink/sentry,ifduyue/sentry,jean/sentry
|
from __future__ import absolute_import
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.func = getattr(target, attr)
self.target = target
self.attr = attr
self.callback = callback
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
setattr(self.target, self.attr, self.func)
Use a thread lock to patch contexts.
This fixes #3185
|
from __future__ import absolute_import
from threading import Lock
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.target = target
self.attr = attr
self.callback = callback
self._lock = Lock()
with self._lock:
self.func = getattr(target, attr)
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
with self._lock:
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
with self._lock:
setattr(self.target, self.attr, self.func)
|
<commit_before>from __future__ import absolute_import
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.func = getattr(target, attr)
self.target = target
self.attr = attr
self.callback = callback
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
setattr(self.target, self.attr, self.func)
<commit_msg>Use a thread lock to patch contexts.
This fixes #3185<commit_after>
|
from __future__ import absolute_import
from threading import Lock
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.target = target
self.attr = attr
self.callback = callback
self._lock = Lock()
with self._lock:
self.func = getattr(target, attr)
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
with self._lock:
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
with self._lock:
setattr(self.target, self.attr, self.func)
|
from __future__ import absolute_import
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.func = getattr(target, attr)
self.target = target
self.attr = attr
self.callback = callback
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
setattr(self.target, self.attr, self.func)
Use a thread lock to patch contexts.
This fixes #3185from __future__ import absolute_import
from threading import Lock
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.target = target
self.attr = attr
self.callback = callback
self._lock = Lock()
with self._lock:
self.func = getattr(target, attr)
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
with self._lock:
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
with self._lock:
setattr(self.target, self.attr, self.func)
|
<commit_before>from __future__ import absolute_import
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.func = getattr(target, attr)
self.target = target
self.attr = attr
self.callback = callback
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
setattr(self.target, self.attr, self.func)
<commit_msg>Use a thread lock to patch contexts.
This fixes #3185<commit_after>from __future__ import absolute_import
from threading import Lock
from sentry.utils.imports import import_string
class PatchContext(object):
def __init__(self, target, callback):
target, attr = target.rsplit('.', 1)
target = import_string(target)
self.target = target
self.attr = attr
self.callback = callback
self._lock = Lock()
with self._lock:
self.func = getattr(target, attr)
def __enter__(self):
self.patch()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.unpatch()
def patch(self):
with self._lock:
func = getattr(self.target, self.attr)
def wrapped(*args, **kwargs):
__traceback_hide__ = True # NOQA
return self.callback(self.func, *args, **kwargs)
wrapped.__name__ = func.__name__
if hasattr(func, '__doc__'):
wrapped.__doc__ = func.__doc__
if hasattr(func, '__module__'):
wrapped.__module__ = func.__module__
setattr(self.target, self.attr, wrapped)
def unpatch(self):
with self._lock:
setattr(self.target, self.attr, self.func)
|
a353ad76774c44004256fef8b076f74b6b639ca4
|
tests/remove_stale_string.py
|
tests/remove_stale_string.py
|
import re
import json
import glob
from collections import OrderedDict
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
print(locale_file)
this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict)
this_locale_fixed = {k:v for k, v in this_locale.items() if k in reference}
json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False)
|
Add script to remove stale i18n strings
|
Add script to remove stale i18n strings
|
Python
|
agpl-3.0
|
YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost
|
Add script to remove stale i18n strings
|
import re
import json
import glob
from collections import OrderedDict
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
print(locale_file)
this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict)
this_locale_fixed = {k:v for k, v in this_locale.items() if k in reference}
json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False)
|
<commit_before><commit_msg>Add script to remove stale i18n strings<commit_after>
|
import re
import json
import glob
from collections import OrderedDict
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
print(locale_file)
this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict)
this_locale_fixed = {k:v for k, v in this_locale.items() if k in reference}
json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False)
|
Add script to remove stale i18n stringsimport re
import json
import glob
from collections import OrderedDict
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
print(locale_file)
this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict)
this_locale_fixed = {k:v for k, v in this_locale.items() if k in reference}
json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False)
|
<commit_before><commit_msg>Add script to remove stale i18n strings<commit_after>import re
import json
import glob
from collections import OrderedDict
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
print(locale_file)
this_locale = json.loads(open(locale_folder + locale_file).read(), object_pairs_hook=OrderedDict)
this_locale_fixed = {k:v for k, v in this_locale.items() if k in reference}
json.dump(this_locale_fixed, open(locale_folder + locale_file, "w"), indent=4, ensure_ascii=False)
|
|
4cc2861ed79d54c5f59a29b5d56dde5aae9c0c81
|
examples/ex_rofi.py
|
examples/ex_rofi.py
|
#!/usr/bin/env python
import string
from dynmen import Menu
rofi = Menu(command=('rofi', '-fullscreen', '-dmenu', '-i'))
d_string = vars(string)
d_string = {k:v for k,v in d_string.items() if not k.startswith('_')}
print('Launching rofi - given a dict')
output = rofi(d_string)
print(output, '\n')
print('Launching rofi - first sorting entries of dict')
output2 = rofi.sort(d_string)
print(output2, '\n')
print('Launching rofi - given a list')
output3 = rofi(list(d_string))
print(output3, '\n')
|
Add a script showing usage in examples
|
Add a script showing usage in examples
|
Python
|
mit
|
frostidaho/dynmen
|
Add a script showing usage in examples
|
#!/usr/bin/env python
import string
from dynmen import Menu
rofi = Menu(command=('rofi', '-fullscreen', '-dmenu', '-i'))
d_string = vars(string)
d_string = {k:v for k,v in d_string.items() if not k.startswith('_')}
print('Launching rofi - given a dict')
output = rofi(d_string)
print(output, '\n')
print('Launching rofi - first sorting entries of dict')
output2 = rofi.sort(d_string)
print(output2, '\n')
print('Launching rofi - given a list')
output3 = rofi(list(d_string))
print(output3, '\n')
|
<commit_before><commit_msg>Add a script showing usage in examples<commit_after>
|
#!/usr/bin/env python
import string
from dynmen import Menu
rofi = Menu(command=('rofi', '-fullscreen', '-dmenu', '-i'))
d_string = vars(string)
d_string = {k:v for k,v in d_string.items() if not k.startswith('_')}
print('Launching rofi - given a dict')
output = rofi(d_string)
print(output, '\n')
print('Launching rofi - first sorting entries of dict')
output2 = rofi.sort(d_string)
print(output2, '\n')
print('Launching rofi - given a list')
output3 = rofi(list(d_string))
print(output3, '\n')
|
Add a script showing usage in examples#!/usr/bin/env python
import string
from dynmen import Menu
rofi = Menu(command=('rofi', '-fullscreen', '-dmenu', '-i'))
d_string = vars(string)
d_string = {k:v for k,v in d_string.items() if not k.startswith('_')}
print('Launching rofi - given a dict')
output = rofi(d_string)
print(output, '\n')
print('Launching rofi - first sorting entries of dict')
output2 = rofi.sort(d_string)
print(output2, '\n')
print('Launching rofi - given a list')
output3 = rofi(list(d_string))
print(output3, '\n')
|
<commit_before><commit_msg>Add a script showing usage in examples<commit_after>#!/usr/bin/env python
import string
from dynmen import Menu
rofi = Menu(command=('rofi', '-fullscreen', '-dmenu', '-i'))
d_string = vars(string)
d_string = {k:v for k,v in d_string.items() if not k.startswith('_')}
print('Launching rofi - given a dict')
output = rofi(d_string)
print(output, '\n')
print('Launching rofi - first sorting entries of dict')
output2 = rofi.sort(d_string)
print(output2, '\n')
print('Launching rofi - given a list')
output3 = rofi(list(d_string))
print(output3, '\n')
|
|
00596dbc602a3e555cef0a3453d83d475c28fc52
|
tests/test_azure_publish_tools.py
|
tests/test_azure_publish_tools.py
|
from argparse import Namespace
from unittest import TestCase
from azure_publish_tools import (
DELETE,
get_option_parser,
LIST,
PUBLISH,
)
class TestOptionParser(TestCase):
def parse_args(self, args):
parser = get_option_parser()
return parser.parse_args(args)
def test_list(self):
args = self.parse_args(['list', 'mypurpose'])
self.assertEqual(Namespace(
command=LIST, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_list_dry_run(self):
args = self.parse_args(['list', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_list_verbose(self):
args = self.parse_args(['list', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish(self):
args = self.parse_args(['publish', 'mypurpose'])
self.assertEqual(Namespace(
command=PUBLISH, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_publish_dry_run(self):
args = self.parse_args(['publish', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_publish_verbose(self):
args = self.parse_args(['publish', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish_path(self):
args = self.parse_args(['publish', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
def test_delete(self):
args = self.parse_args(['delete', 'mypurpose'])
self.assertEqual(Namespace(
command=DELETE, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_delete_dry_run(self):
args = self.parse_args(['delete', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_delete_verbose(self):
args = self.parse_args(['delete', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_delete_path(self):
args = self.parse_args(['delete', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
|
Add tests for current arg parsing.
|
Add tests for current arg parsing.
|
Python
|
agpl-3.0
|
mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju
|
Add tests for current arg parsing.
|
from argparse import Namespace
from unittest import TestCase
from azure_publish_tools import (
DELETE,
get_option_parser,
LIST,
PUBLISH,
)
class TestOptionParser(TestCase):
def parse_args(self, args):
parser = get_option_parser()
return parser.parse_args(args)
def test_list(self):
args = self.parse_args(['list', 'mypurpose'])
self.assertEqual(Namespace(
command=LIST, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_list_dry_run(self):
args = self.parse_args(['list', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_list_verbose(self):
args = self.parse_args(['list', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish(self):
args = self.parse_args(['publish', 'mypurpose'])
self.assertEqual(Namespace(
command=PUBLISH, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_publish_dry_run(self):
args = self.parse_args(['publish', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_publish_verbose(self):
args = self.parse_args(['publish', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish_path(self):
args = self.parse_args(['publish', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
def test_delete(self):
args = self.parse_args(['delete', 'mypurpose'])
self.assertEqual(Namespace(
command=DELETE, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_delete_dry_run(self):
args = self.parse_args(['delete', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_delete_verbose(self):
args = self.parse_args(['delete', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_delete_path(self):
args = self.parse_args(['delete', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
|
<commit_before><commit_msg>Add tests for current arg parsing.<commit_after>
|
from argparse import Namespace
from unittest import TestCase
from azure_publish_tools import (
DELETE,
get_option_parser,
LIST,
PUBLISH,
)
class TestOptionParser(TestCase):
def parse_args(self, args):
parser = get_option_parser()
return parser.parse_args(args)
def test_list(self):
args = self.parse_args(['list', 'mypurpose'])
self.assertEqual(Namespace(
command=LIST, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_list_dry_run(self):
args = self.parse_args(['list', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_list_verbose(self):
args = self.parse_args(['list', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish(self):
args = self.parse_args(['publish', 'mypurpose'])
self.assertEqual(Namespace(
command=PUBLISH, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_publish_dry_run(self):
args = self.parse_args(['publish', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_publish_verbose(self):
args = self.parse_args(['publish', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish_path(self):
args = self.parse_args(['publish', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
def test_delete(self):
args = self.parse_args(['delete', 'mypurpose'])
self.assertEqual(Namespace(
command=DELETE, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_delete_dry_run(self):
args = self.parse_args(['delete', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_delete_verbose(self):
args = self.parse_args(['delete', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_delete_path(self):
args = self.parse_args(['delete', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
|
Add tests for current arg parsing.from argparse import Namespace
from unittest import TestCase
from azure_publish_tools import (
DELETE,
get_option_parser,
LIST,
PUBLISH,
)
class TestOptionParser(TestCase):
def parse_args(self, args):
parser = get_option_parser()
return parser.parse_args(args)
def test_list(self):
args = self.parse_args(['list', 'mypurpose'])
self.assertEqual(Namespace(
command=LIST, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_list_dry_run(self):
args = self.parse_args(['list', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_list_verbose(self):
args = self.parse_args(['list', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish(self):
args = self.parse_args(['publish', 'mypurpose'])
self.assertEqual(Namespace(
command=PUBLISH, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_publish_dry_run(self):
args = self.parse_args(['publish', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_publish_verbose(self):
args = self.parse_args(['publish', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish_path(self):
args = self.parse_args(['publish', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
def test_delete(self):
args = self.parse_args(['delete', 'mypurpose'])
self.assertEqual(Namespace(
command=DELETE, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_delete_dry_run(self):
args = self.parse_args(['delete', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_delete_verbose(self):
args = self.parse_args(['delete', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_delete_path(self):
args = self.parse_args(['delete', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
|
<commit_before><commit_msg>Add tests for current arg parsing.<commit_after>from argparse import Namespace
from unittest import TestCase
from azure_publish_tools import (
DELETE,
get_option_parser,
LIST,
PUBLISH,
)
class TestOptionParser(TestCase):
def parse_args(self, args):
parser = get_option_parser()
return parser.parse_args(args)
def test_list(self):
args = self.parse_args(['list', 'mypurpose'])
self.assertEqual(Namespace(
command=LIST, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_list_dry_run(self):
args = self.parse_args(['list', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_list_verbose(self):
args = self.parse_args(['list', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish(self):
args = self.parse_args(['publish', 'mypurpose'])
self.assertEqual(Namespace(
command=PUBLISH, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_publish_dry_run(self):
args = self.parse_args(['publish', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_publish_verbose(self):
args = self.parse_args(['publish', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_publish_path(self):
args = self.parse_args(['publish', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
def test_delete(self):
args = self.parse_args(['delete', 'mypurpose'])
self.assertEqual(Namespace(
command=DELETE, purpose='mypurpose', dry_run=False, verbose=False,
path=[]), args)
def test_delete_dry_run(self):
args = self.parse_args(['delete', 'mypurpose', '--dry-run'])
self.assertIs(True, args.dry_run)
def test_delete_verbose(self):
args = self.parse_args(['delete', 'mypurpose', '--verbose'])
self.assertIs(True, args.verbose)
def test_delete_path(self):
args = self.parse_args(['delete', 'mypurpose', 'mypath', 'mypath2'])
self.assertEqual(['mypath', 'mypath2'], args.path)
|
|
8a9329a5c2b97d32a1fd32ae16c21222fb10b6b2
|
cms_lab_carousel/migrations/0003_auto_20150417_1240.py
|
cms_lab_carousel/migrations/0003_auto_20150417_1240.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import django.core.validators
import datetime
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0002_auto_20150417_1219'),
]
operations = [
migrations.RemoveField(
model_name='carousel',
name='footer_height',
),
migrations.RemoveField(
model_name='carousel',
name='header_height',
),
migrations.AlterField(
model_name='carousel',
name='slide_limit',
field=models.IntegerField(default=10, verbose_name='slide limit', validators=[django.core.validators.MinValueValidator(1)], help_text='Specify the maximum # of slides to display.'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(default='default', choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], blank=True, max_length=7, verbose_name='other URL color', help_text="If there is another relevant URL for this slide, choose the color for it's button."),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 17, 19, 40, 36, 175156, tzinfo=utc), verbose_name='date/time slide published', help_text='Choose date/time to publish slide. Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.'),
preserve_default=True,
),
]
|
Make migrations for changes to models
|
Make migrations for changes to models
|
Python
|
bsd-3-clause
|
mfcovington/djangocms-lab-carousel,mfcovington/djangocms-lab-carousel
|
Make migrations for changes to models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import django.core.validators
import datetime
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0002_auto_20150417_1219'),
]
operations = [
migrations.RemoveField(
model_name='carousel',
name='footer_height',
),
migrations.RemoveField(
model_name='carousel',
name='header_height',
),
migrations.AlterField(
model_name='carousel',
name='slide_limit',
field=models.IntegerField(default=10, verbose_name='slide limit', validators=[django.core.validators.MinValueValidator(1)], help_text='Specify the maximum # of slides to display.'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(default='default', choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], blank=True, max_length=7, verbose_name='other URL color', help_text="If there is another relevant URL for this slide, choose the color for it's button."),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 17, 19, 40, 36, 175156, tzinfo=utc), verbose_name='date/time slide published', help_text='Choose date/time to publish slide. Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Make migrations for changes to models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import django.core.validators
import datetime
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0002_auto_20150417_1219'),
]
operations = [
migrations.RemoveField(
model_name='carousel',
name='footer_height',
),
migrations.RemoveField(
model_name='carousel',
name='header_height',
),
migrations.AlterField(
model_name='carousel',
name='slide_limit',
field=models.IntegerField(default=10, verbose_name='slide limit', validators=[django.core.validators.MinValueValidator(1)], help_text='Specify the maximum # of slides to display.'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(default='default', choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], blank=True, max_length=7, verbose_name='other URL color', help_text="If there is another relevant URL for this slide, choose the color for it's button."),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 17, 19, 40, 36, 175156, tzinfo=utc), verbose_name='date/time slide published', help_text='Choose date/time to publish slide. Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.'),
preserve_default=True,
),
]
|
Make migrations for changes to models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import django.core.validators
import datetime
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0002_auto_20150417_1219'),
]
operations = [
migrations.RemoveField(
model_name='carousel',
name='footer_height',
),
migrations.RemoveField(
model_name='carousel',
name='header_height',
),
migrations.AlterField(
model_name='carousel',
name='slide_limit',
field=models.IntegerField(default=10, verbose_name='slide limit', validators=[django.core.validators.MinValueValidator(1)], help_text='Specify the maximum # of slides to display.'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(default='default', choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], blank=True, max_length=7, verbose_name='other URL color', help_text="If there is another relevant URL for this slide, choose the color for it's button."),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 17, 19, 40, 36, 175156, tzinfo=utc), verbose_name='date/time slide published', help_text='Choose date/time to publish slide. Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Make migrations for changes to models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.utils.timezone import utc
import django.core.validators
import datetime
class Migration(migrations.Migration):
dependencies = [
('cms_lab_carousel', '0002_auto_20150417_1219'),
]
operations = [
migrations.RemoveField(
model_name='carousel',
name='footer_height',
),
migrations.RemoveField(
model_name='carousel',
name='header_height',
),
migrations.AlterField(
model_name='carousel',
name='slide_limit',
field=models.IntegerField(default=10, verbose_name='slide limit', validators=[django.core.validators.MinValueValidator(1)], help_text='Specify the maximum # of slides to display.'),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='other_url_color',
field=models.CharField(default='default', choices=[('default', 'White'), ('primary', 'Blue'), ('info', 'Light Blue'), ('success', 'Green'), ('warning', 'Orange'), ('danger', 'Red')], blank=True, max_length=7, verbose_name='other URL color', help_text="If there is another relevant URL for this slide, choose the color for it's button."),
preserve_default=True,
),
migrations.AlterField(
model_name='slide',
name='publish_datetime',
field=models.DateTimeField(default=datetime.datetime(2015, 4, 17, 19, 40, 36, 175156, tzinfo=utc), verbose_name='date/time slide published', help_text='Choose date/time to publish slide. Slides are displayed in reverse-chronological order, so this can be used to control their order. A future date will be hide a slide until that date.'),
preserve_default=True,
),
]
|
|
3eefb913a11a91cfe543b5efe926e233953e6b0c
|
tests/test_usfirst_event_type_parser.py
|
tests/test_usfirst_event_type_parser.py
|
import unittest2
from models.event import Event
from helpers.event_helper import EventHelper
class TestUsfirstEventTypeParser(unittest2.TestCase):
def test_parse(self):
self.assertEqual(EventHelper.parseEventType("Regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("district"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("MI District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifying Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifier"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("MI FRC State Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Qualifying Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Championship Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Championship Finals"), Event.CMP_FINALS)
self.assertEqual(EventHelper.parseEventType("Championship"), Event.CMP_FINALS)
|
Add test for event type parser
|
Add test for event type parser
|
Python
|
mit
|
josephbisch/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,fangeugene/the-blue-alliance,bdaroz/the-blue-alliance,fangeugene/the-blue-alliance,1fish2/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,tsteward/the-blue-alliance,tsteward/the-blue-alliance,1fish2/the-blue-alliance,phil-lopreiato/the-blue-alliance,josephbisch/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,jaredhasenklein/the-blue-alliance,josephbisch/the-blue-alliance,bvisness/the-blue-alliance,nwalters512/the-blue-alliance,1fish2/the-blue-alliance,bvisness/the-blue-alliance,fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,nwalters512/the-blue-alliance,jaredhasenklein/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,1fish2/the-blue-alliance,phil-lopreiato/the-blue-alliance,tsteward/the-blue-alliance,tsteward/the-blue-alliance,bvisness/the-blue-alliance,the-blue-alliance/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,synth3tk/the-blue-alliance,synth3tk/the-blue-alliance,fangeugene/the-blue-alliance,synth3tk/the-blue-alliance,the-blue-alliance/the-blue-alliance,1fish2/the-blue-alliance,josephbisch/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,josephbisch/the-blue-alliance,bvisness/the-blue-alliance,the-blue-alliance/the-blue-alliance,josephbisch/the-blue-alliance,synth3tk/the-blue-alliance,verycumbersome/the-blue-alliance,jaredhasenklein/the-blue-alliance,jaredhasenklein/the-blue-alliance,bdaroz/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,the-blue-alliance/the-blue-alliance,verycumbersome/the-blue-alliance,verycumbersome/the-blue-alliance,bvisness/the-blue-alliance,verycumbersome/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,tsteward/the-blue-alliance,nwalters512/the-blue-alliance,bvisness/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,the-blue-alliance/the-blue-alliance,synth3tk/the-blue-alliance,1fish2/the-blue-alliance
|
Add test for event type parser
|
import unittest2
from models.event import Event
from helpers.event_helper import EventHelper
class TestUsfirstEventTypeParser(unittest2.TestCase):
def test_parse(self):
self.assertEqual(EventHelper.parseEventType("Regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("district"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("MI District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifying Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifier"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("MI FRC State Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Qualifying Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Championship Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Championship Finals"), Event.CMP_FINALS)
self.assertEqual(EventHelper.parseEventType("Championship"), Event.CMP_FINALS)
|
<commit_before><commit_msg>Add test for event type parser<commit_after>
|
import unittest2
from models.event import Event
from helpers.event_helper import EventHelper
class TestUsfirstEventTypeParser(unittest2.TestCase):
def test_parse(self):
self.assertEqual(EventHelper.parseEventType("Regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("district"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("MI District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifying Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifier"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("MI FRC State Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Qualifying Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Championship Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Championship Finals"), Event.CMP_FINALS)
self.assertEqual(EventHelper.parseEventType("Championship"), Event.CMP_FINALS)
|
Add test for event type parserimport unittest2
from models.event import Event
from helpers.event_helper import EventHelper
class TestUsfirstEventTypeParser(unittest2.TestCase):
def test_parse(self):
self.assertEqual(EventHelper.parseEventType("Regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("district"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("MI District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifying Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifier"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("MI FRC State Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Qualifying Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Championship Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Championship Finals"), Event.CMP_FINALS)
self.assertEqual(EventHelper.parseEventType("Championship"), Event.CMP_FINALS)
|
<commit_before><commit_msg>Add test for event type parser<commit_after>import unittest2
from models.event import Event
from helpers.event_helper import EventHelper
class TestUsfirstEventTypeParser(unittest2.TestCase):
def test_parse(self):
self.assertEqual(EventHelper.parseEventType("Regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("regional"), Event.REGIONAL)
self.assertEqual(EventHelper.parseEventType("District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("district"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("MI District"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifying Event"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("Qualifier"), Event.DISTRICT)
self.assertEqual(EventHelper.parseEventType("District Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("MI FRC State Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Qualifying Championship"), Event.DISTRICT_CMP)
self.assertEqual(EventHelper.parseEventType("Championship Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Division"), Event.CMP_DIVISION)
self.assertEqual(EventHelper.parseEventType("Championship Finals"), Event.CMP_FINALS)
self.assertEqual(EventHelper.parseEventType("Championship"), Event.CMP_FINALS)
|
|
bc6a9324c00909a62dc26477224bbfc58def9eb2
|
external_tools/src/main/python/omero56/scripts/omerok8s_impc_config.py
|
external_tools/src/main/python/omero56/scripts/omerok8s_impc_config.py
|
"""Configure Omero on a k8s server for the way it is used in IMPC
"""
import sys
import argparse
import omero.cli # For some reason if I do not do this next line throws error
from omero import ApiUsageException
from omero.cli import NonZeroReturnCode
from omeroservice import OmeroService
from utils import get_properties_from_configuration_file
argument_parser = argparse.ArgumentParser(
description="Configure Omero on a k8s server for use in IMPC"
)
argument_parser.add_argument("--omero-props-path", required=True,
help="Path to configuration file"
)
args = argument_parser.parse_args()
omero_props = get_properties_from_configuration_file(args.omero_props_path)
omero_host = omero_props['omerohost']
omero_port = omero_props['omeroport']
omero_root_user = omero_props['omerouser']
omero_root_pass = omero_props['omeropass']
omero_public_user = omero_props['omeropublicuser']
omero_public_group = omero_props['omerogroup']
omero_public_pass = omero_props['omeropublicpass']
omero_system_group = omero_props['omerosystemgroup']
omero_service = OmeroService(omero_host, omero_port, omero_root_user, omero_root_pass, omero_system_group)
cli = omero_service.cli
def run_command(cli, cmd):
"""Run a command in the omero cli. Exit if error
"""
try:
cli.invoke(cmd, strict=True)
except (ApiUsageException, NonZeroReturnCode,) as e:
print(f"Error running command {cmd}.\nError was: {e}")
print("\nExiting")
sys.exit(-1)
# Create the public group
cmd = f"group add --type read-only --ignore-existing {omero_public_group}"
run_command(cli, cmd)
# Create the public user
cmd = f"user add --ignore-existing -i EBI " + \
f"--group-name {omero_public_group} -P {omero_public_pass} " + \
f"{omero_public_user} public user"
run_command(cli, cmd)
# Ensure the webclient allows login by the public user without any password
# Ensure the root user is part of the public group - so can import/export images
cmd = f"user joingroup --name {omero_public_user} --group-name {omero_public_group} --as-owner"
run_command(cli, cmd)
|
Add script to configure omerok8s for public_group and public_user
|
Add script to configure omerok8s for public_group and public_user
|
Python
|
apache-2.0
|
mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData
|
Add script to configure omerok8s for public_group and public_user
|
"""Configure Omero on a k8s server for the way it is used in IMPC
"""
import sys
import argparse
import omero.cli # For some reason if I do not do this next line throws error
from omero import ApiUsageException
from omero.cli import NonZeroReturnCode
from omeroservice import OmeroService
from utils import get_properties_from_configuration_file
argument_parser = argparse.ArgumentParser(
description="Configure Omero on a k8s server for use in IMPC"
)
argument_parser.add_argument("--omero-props-path", required=True,
help="Path to configuration file"
)
args = argument_parser.parse_args()
omero_props = get_properties_from_configuration_file(args.omero_props_path)
omero_host = omero_props['omerohost']
omero_port = omero_props['omeroport']
omero_root_user = omero_props['omerouser']
omero_root_pass = omero_props['omeropass']
omero_public_user = omero_props['omeropublicuser']
omero_public_group = omero_props['omerogroup']
omero_public_pass = omero_props['omeropublicpass']
omero_system_group = omero_props['omerosystemgroup']
omero_service = OmeroService(omero_host, omero_port, omero_root_user, omero_root_pass, omero_system_group)
cli = omero_service.cli
def run_command(cli, cmd):
"""Run a command in the omero cli. Exit if error
"""
try:
cli.invoke(cmd, strict=True)
except (ApiUsageException, NonZeroReturnCode,) as e:
print(f"Error running command {cmd}.\nError was: {e}")
print("\nExiting")
sys.exit(-1)
# Create the public group
cmd = f"group add --type read-only --ignore-existing {omero_public_group}"
run_command(cli, cmd)
# Create the public user
cmd = f"user add --ignore-existing -i EBI " + \
f"--group-name {omero_public_group} -P {omero_public_pass} " + \
f"{omero_public_user} public user"
run_command(cli, cmd)
# Ensure the webclient allows login by the public user without any password
# Ensure the root user is part of the public group - so can import/export images
cmd = f"user joingroup --name {omero_public_user} --group-name {omero_public_group} --as-owner"
run_command(cli, cmd)
|
<commit_before><commit_msg>Add script to configure omerok8s for public_group and public_user<commit_after>
|
"""Configure Omero on a k8s server for the way it is used in IMPC
"""
import sys
import argparse
import omero.cli # For some reason if I do not do this next line throws error
from omero import ApiUsageException
from omero.cli import NonZeroReturnCode
from omeroservice import OmeroService
from utils import get_properties_from_configuration_file
argument_parser = argparse.ArgumentParser(
description="Configure Omero on a k8s server for use in IMPC"
)
argument_parser.add_argument("--omero-props-path", required=True,
help="Path to configuration file"
)
args = argument_parser.parse_args()
omero_props = get_properties_from_configuration_file(args.omero_props_path)
omero_host = omero_props['omerohost']
omero_port = omero_props['omeroport']
omero_root_user = omero_props['omerouser']
omero_root_pass = omero_props['omeropass']
omero_public_user = omero_props['omeropublicuser']
omero_public_group = omero_props['omerogroup']
omero_public_pass = omero_props['omeropublicpass']
omero_system_group = omero_props['omerosystemgroup']
omero_service = OmeroService(omero_host, omero_port, omero_root_user, omero_root_pass, omero_system_group)
cli = omero_service.cli
def run_command(cli, cmd):
"""Run a command in the omero cli. Exit if error
"""
try:
cli.invoke(cmd, strict=True)
except (ApiUsageException, NonZeroReturnCode,) as e:
print(f"Error running command {cmd}.\nError was: {e}")
print("\nExiting")
sys.exit(-1)
# Create the public group
cmd = f"group add --type read-only --ignore-existing {omero_public_group}"
run_command(cli, cmd)
# Create the public user
cmd = f"user add --ignore-existing -i EBI " + \
f"--group-name {omero_public_group} -P {omero_public_pass} " + \
f"{omero_public_user} public user"
run_command(cli, cmd)
# Ensure the webclient allows login by the public user without any password
# Ensure the root user is part of the public group - so can import/export images
cmd = f"user joingroup --name {omero_public_user} --group-name {omero_public_group} --as-owner"
run_command(cli, cmd)
|
Add script to configure omerok8s for public_group and public_user"""Configure Omero on a k8s server for the way it is used in IMPC
"""
import sys
import argparse
import omero.cli # For some reason if I do not do this next line throws error
from omero import ApiUsageException
from omero.cli import NonZeroReturnCode
from omeroservice import OmeroService
from utils import get_properties_from_configuration_file
argument_parser = argparse.ArgumentParser(
description="Configure Omero on a k8s server for use in IMPC"
)
argument_parser.add_argument("--omero-props-path", required=True,
help="Path to configuration file"
)
args = argument_parser.parse_args()
omero_props = get_properties_from_configuration_file(args.omero_props_path)
omero_host = omero_props['omerohost']
omero_port = omero_props['omeroport']
omero_root_user = omero_props['omerouser']
omero_root_pass = omero_props['omeropass']
omero_public_user = omero_props['omeropublicuser']
omero_public_group = omero_props['omerogroup']
omero_public_pass = omero_props['omeropublicpass']
omero_system_group = omero_props['omerosystemgroup']
omero_service = OmeroService(omero_host, omero_port, omero_root_user, omero_root_pass, omero_system_group)
cli = omero_service.cli
def run_command(cli, cmd):
"""Run a command in the omero cli. Exit if error
"""
try:
cli.invoke(cmd, strict=True)
except (ApiUsageException, NonZeroReturnCode,) as e:
print(f"Error running command {cmd}.\nError was: {e}")
print("\nExiting")
sys.exit(-1)
# Create the public group
cmd = f"group add --type read-only --ignore-existing {omero_public_group}"
run_command(cli, cmd)
# Create the public user
cmd = f"user add --ignore-existing -i EBI " + \
f"--group-name {omero_public_group} -P {omero_public_pass} " + \
f"{omero_public_user} public user"
run_command(cli, cmd)
# Ensure the webclient allows login by the public user without any password
# Ensure the root user is part of the public group - so can import/export images
cmd = f"user joingroup --name {omero_public_user} --group-name {omero_public_group} --as-owner"
run_command(cli, cmd)
|
<commit_before><commit_msg>Add script to configure omerok8s for public_group and public_user<commit_after>"""Configure Omero on a k8s server for the way it is used in IMPC
"""
import sys
import argparse
import omero.cli # For some reason if I do not do this next line throws error
from omero import ApiUsageException
from omero.cli import NonZeroReturnCode
from omeroservice import OmeroService
from utils import get_properties_from_configuration_file
argument_parser = argparse.ArgumentParser(
description="Configure Omero on a k8s server for use in IMPC"
)
argument_parser.add_argument("--omero-props-path", required=True,
help="Path to configuration file"
)
args = argument_parser.parse_args()
omero_props = get_properties_from_configuration_file(args.omero_props_path)
omero_host = omero_props['omerohost']
omero_port = omero_props['omeroport']
omero_root_user = omero_props['omerouser']
omero_root_pass = omero_props['omeropass']
omero_public_user = omero_props['omeropublicuser']
omero_public_group = omero_props['omerogroup']
omero_public_pass = omero_props['omeropublicpass']
omero_system_group = omero_props['omerosystemgroup']
omero_service = OmeroService(omero_host, omero_port, omero_root_user, omero_root_pass, omero_system_group)
cli = omero_service.cli
def run_command(cli, cmd):
"""Run a command in the omero cli. Exit if error
"""
try:
cli.invoke(cmd, strict=True)
except (ApiUsageException, NonZeroReturnCode,) as e:
print(f"Error running command {cmd}.\nError was: {e}")
print("\nExiting")
sys.exit(-1)
# Create the public group
cmd = f"group add --type read-only --ignore-existing {omero_public_group}"
run_command(cli, cmd)
# Create the public user
cmd = f"user add --ignore-existing -i EBI " + \
f"--group-name {omero_public_group} -P {omero_public_pass} " + \
f"{omero_public_user} public user"
run_command(cli, cmd)
# Ensure the webclient allows login by the public user without any password
# Ensure the root user is part of the public group - so can import/export images
cmd = f"user joingroup --name {omero_public_user} --group-name {omero_public_group} --as-owner"
run_command(cli, cmd)
|
|
9cd5678fbeb3ad5a26bf9578a1f562c46a2de26e
|
example_iterator_with_custom_order.py
|
example_iterator_with_custom_order.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 21 21:16:43 2017
@author: ryuhei
"""
import numpy as np
from chainer.datasets import TupleDataset
from sklearn.preprocessing import LabelEncoder
from my_iterators import SerialIterator
class NPairMCIndexesSampler(object):
def __init__(self, labels, batch_size, num_batches):
assert len(labels) >= (batch_size * num_batches), (
"batch_size * num_batches must not exceed the number of examples")
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self.num_batches = num_batches
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __call__(self):
indexes = []
for _ in range(self.num_batches):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
indexes.append(anchor_indexes)
indexes.append(positive_indexes)
return np.concatenate(indexes)
if __name__ == '__main__':
batch_size = 10
num_batches = 5
repeat = True
labels = np.array(sum([[i]*10 for i in range(10)], []))
num_examples = len(labels)
x = np.arange(num_examples)
dataset = TupleDataset(x, labels)
indexes_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, repeat=repeat,
order_sampler=indexes_sampler)
for i in range(num_batches*2):
batch = next(it)
print len(batch)
print batch[:batch_size/2]
print batch[batch_size/2:]
print
|
Implement an example of the modified SerialIterator
|
Implement an example of the modified SerialIterator
|
Python
|
mit
|
ronekko/deep_metric_learning
|
Implement an example of the modified SerialIterator
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 21 21:16:43 2017
@author: ryuhei
"""
import numpy as np
from chainer.datasets import TupleDataset
from sklearn.preprocessing import LabelEncoder
from my_iterators import SerialIterator
class NPairMCIndexesSampler(object):
def __init__(self, labels, batch_size, num_batches):
assert len(labels) >= (batch_size * num_batches), (
"batch_size * num_batches must not exceed the number of examples")
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self.num_batches = num_batches
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __call__(self):
indexes = []
for _ in range(self.num_batches):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
indexes.append(anchor_indexes)
indexes.append(positive_indexes)
return np.concatenate(indexes)
if __name__ == '__main__':
batch_size = 10
num_batches = 5
repeat = True
labels = np.array(sum([[i]*10 for i in range(10)], []))
num_examples = len(labels)
x = np.arange(num_examples)
dataset = TupleDataset(x, labels)
indexes_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, repeat=repeat,
order_sampler=indexes_sampler)
for i in range(num_batches*2):
batch = next(it)
print len(batch)
print batch[:batch_size/2]
print batch[batch_size/2:]
print
|
<commit_before><commit_msg>Implement an example of the modified SerialIterator<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 21 21:16:43 2017
@author: ryuhei
"""
import numpy as np
from chainer.datasets import TupleDataset
from sklearn.preprocessing import LabelEncoder
from my_iterators import SerialIterator
class NPairMCIndexesSampler(object):
def __init__(self, labels, batch_size, num_batches):
assert len(labels) >= (batch_size * num_batches), (
"batch_size * num_batches must not exceed the number of examples")
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self.num_batches = num_batches
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __call__(self):
indexes = []
for _ in range(self.num_batches):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
indexes.append(anchor_indexes)
indexes.append(positive_indexes)
return np.concatenate(indexes)
if __name__ == '__main__':
batch_size = 10
num_batches = 5
repeat = True
labels = np.array(sum([[i]*10 for i in range(10)], []))
num_examples = len(labels)
x = np.arange(num_examples)
dataset = TupleDataset(x, labels)
indexes_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, repeat=repeat,
order_sampler=indexes_sampler)
for i in range(num_batches*2):
batch = next(it)
print len(batch)
print batch[:batch_size/2]
print batch[batch_size/2:]
print
|
Implement an example of the modified SerialIterator# -*- coding: utf-8 -*-
"""
Created on Sat Jan 21 21:16:43 2017
@author: ryuhei
"""
import numpy as np
from chainer.datasets import TupleDataset
from sklearn.preprocessing import LabelEncoder
from my_iterators import SerialIterator
class NPairMCIndexesSampler(object):
def __init__(self, labels, batch_size, num_batches):
assert len(labels) >= (batch_size * num_batches), (
"batch_size * num_batches must not exceed the number of examples")
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self.num_batches = num_batches
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __call__(self):
indexes = []
for _ in range(self.num_batches):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
indexes.append(anchor_indexes)
indexes.append(positive_indexes)
return np.concatenate(indexes)
if __name__ == '__main__':
batch_size = 10
num_batches = 5
repeat = True
labels = np.array(sum([[i]*10 for i in range(10)], []))
num_examples = len(labels)
x = np.arange(num_examples)
dataset = TupleDataset(x, labels)
indexes_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, repeat=repeat,
order_sampler=indexes_sampler)
for i in range(num_batches*2):
batch = next(it)
print len(batch)
print batch[:batch_size/2]
print batch[batch_size/2:]
print
|
<commit_before><commit_msg>Implement an example of the modified SerialIterator<commit_after># -*- coding: utf-8 -*-
"""
Created on Sat Jan 21 21:16:43 2017
@author: ryuhei
"""
import numpy as np
from chainer.datasets import TupleDataset
from sklearn.preprocessing import LabelEncoder
from my_iterators import SerialIterator
class NPairMCIndexesSampler(object):
def __init__(self, labels, batch_size, num_batches):
assert len(labels) >= (batch_size * num_batches), (
"batch_size * num_batches must not exceed the number of examples")
self._labels = np.array(labels).flatten()
self._label_encoder = LabelEncoder().fit(self._labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size % 2 == 0, ("batch_size must be even number.")
assert batch_size <= self.num_classes * 2, (
"batch_size must not exceed twice the number of classes"
"(i.e. set batch_size <= {}).".format(self.num_classes * 2))
self.batch_size = batch_size
self.num_batches = num_batches
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def __call__(self):
indexes = []
for _ in range(self.num_batches):
random_classes = np.random.choice(
self.num_classes, self.batch_size / 2, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
indexes.append(anchor_indexes)
indexes.append(positive_indexes)
return np.concatenate(indexes)
if __name__ == '__main__':
batch_size = 10
num_batches = 5
repeat = True
labels = np.array(sum([[i]*10 for i in range(10)], []))
num_examples = len(labels)
x = np.arange(num_examples)
dataset = TupleDataset(x, labels)
indexes_sampler = NPairMCIndexesSampler(labels, batch_size, num_batches)
it = SerialIterator(dataset, batch_size, repeat=repeat,
order_sampler=indexes_sampler)
for i in range(num_batches*2):
batch = next(it)
print len(batch)
print batch[:batch_size/2]
print batch[batch_size/2:]
print
|
|
8768b66ae982d19964f85feb325a1b0f35ed1c87
|
odo/backends/dask_array.py
|
odo/backends/dask_array.py
|
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import discover
from toolz import merge, accumulate
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from dask.array.core import rec_concatenate, Array, getem, get, names, from_array
from dask.core import flatten
from dask.compatibility import long
from ..append import append
from ..convert import convert
from ..resource import resource
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
|
Migrate dask array odo backend from dask.
|
Migrate dask array odo backend from dask.
|
Python
|
bsd-3-clause
|
cpcloud/odo,ContinuumIO/odo,ywang007/odo,cpcloud/odo,alexmojaki/odo,quantopian/odo,ContinuumIO/odo,Dannnno/odo,cowlicks/odo,quantopian/odo,blaze/odo,cowlicks/odo,alexmojaki/odo,Dannnno/odo,ywang007/odo,blaze/odo
|
Migrate dask array odo backend from dask.
|
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import discover
from toolz import merge, accumulate
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from dask.array.core import rec_concatenate, Array, getem, get, names, from_array
from dask.core import flatten
from dask.compatibility import long
from ..append import append
from ..convert import convert
from ..resource import resource
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
|
<commit_before><commit_msg>Migrate dask array odo backend from dask.<commit_after>
|
from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import discover
from toolz import merge, accumulate
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from dask.array.core import rec_concatenate, Array, getem, get, names, from_array
from dask.core import flatten
from dask.compatibility import long
from ..append import append
from ..convert import convert
from ..resource import resource
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
|
Migrate dask array odo backend from dask.from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import discover
from toolz import merge, accumulate
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from dask.array.core import rec_concatenate, Array, getem, get, names, from_array
from dask.core import flatten
from dask.compatibility import long
from ..append import append
from ..convert import convert
from ..resource import resource
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
|
<commit_before><commit_msg>Migrate dask array odo backend from dask.<commit_after>from __future__ import absolute_import, division, print_function
import numpy as np
from datashape import discover
from toolz import merge, accumulate
from datashape.dispatch import dispatch
from datashape import DataShape
from operator import add
import itertools
from dask.array.core import rec_concatenate, Array, getem, get, names, from_array
from dask.core import flatten
from dask.compatibility import long
from ..append import append
from ..convert import convert
from ..resource import resource
@discover.register(Array)
def discover_dask_array(a, **kwargs):
block = a._get_block(*([0] * a.ndim))
return DataShape(*(a.shape + (discover(block).measure,)))
arrays = [np.ndarray]
try:
import h5py
arrays.append(h5py.Dataset)
@dispatch(h5py.Dataset, (int, long))
def resize(x, size):
s = list(x.shape)
s[0] = size
return resize(x, tuple(s))
@dispatch(h5py.Dataset, tuple)
def resize(x, shape):
return x.resize(shape)
except ImportError:
pass
try:
import bcolz
arrays.append(bcolz.carray)
@dispatch(bcolz.carray, (int, long))
def resize(x, size):
return x.resize(size)
except ImportError:
pass
@convert.register(Array, tuple(arrays), cost=0.01)
def array_to_dask(x, name=None, blockshape=None, **kwargs):
return from_array(x, blockshape=blockshape, name=name, **kwargs)
@convert.register(np.ndarray, Array, cost=0.5)
def dask_to_numpy(x, **kwargs):
return rec_concatenate(get(x.dask, x._keys(), **kwargs))
@convert.register(float, Array, cost=0.5)
def dask_to_float(x, **kwargs):
return x.compute()
@append.register(tuple(arrays), Array)
def store_Array_in_ooc_data(out, arr, inplace=False, **kwargs):
if not inplace:
# Resize output dataset to accept new data
assert out.shape[1:] == arr.shape[1:]
resize(out, out.shape[0] + arr.shape[0]) # elongate
return arr.store(out)
|
|
63ffa531eebfba19344dee67b3f417072012a7f4
|
CodeFights/rangeBitCount.py
|
CodeFights/rangeBitCount.py
|
#!/usr/local/bin/python
# Code Fights Range Bit Count (Core) Problem
def rangeBitCount(a, b):
return (''.join([bin(n) for n in range(a, b + 1)])).count('1')
def main():
tests = [
[2, 7, 11],
[0, 1, 1],
[1, 10, 17],
[8, 9, 3],
[9, 10, 4]
]
for t in tests:
res = rangeBitCount(t[0], t[1])
if t[2] == res:
print("PASSED: rangeBitCount({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: rangeBitCount({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights range bit count problem
|
Solve Code Fights range bit count problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights range bit count problem
|
#!/usr/local/bin/python
# Code Fights Range Bit Count (Core) Problem
def rangeBitCount(a, b):
return (''.join([bin(n) for n in range(a, b + 1)])).count('1')
def main():
tests = [
[2, 7, 11],
[0, 1, 1],
[1, 10, 17],
[8, 9, 3],
[9, 10, 4]
]
for t in tests:
res = rangeBitCount(t[0], t[1])
if t[2] == res:
print("PASSED: rangeBitCount({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: rangeBitCount({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights range bit count problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Range Bit Count (Core) Problem
def rangeBitCount(a, b):
return (''.join([bin(n) for n in range(a, b + 1)])).count('1')
def main():
tests = [
[2, 7, 11],
[0, 1, 1],
[1, 10, 17],
[8, 9, 3],
[9, 10, 4]
]
for t in tests:
res = rangeBitCount(t[0], t[1])
if t[2] == res:
print("PASSED: rangeBitCount({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: rangeBitCount({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
Solve Code Fights range bit count problem#!/usr/local/bin/python
# Code Fights Range Bit Count (Core) Problem
def rangeBitCount(a, b):
return (''.join([bin(n) for n in range(a, b + 1)])).count('1')
def main():
tests = [
[2, 7, 11],
[0, 1, 1],
[1, 10, 17],
[8, 9, 3],
[9, 10, 4]
]
for t in tests:
res = rangeBitCount(t[0], t[1])
if t[2] == res:
print("PASSED: rangeBitCount({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: rangeBitCount({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights range bit count problem<commit_after>#!/usr/local/bin/python
# Code Fights Range Bit Count (Core) Problem
def rangeBitCount(a, b):
return (''.join([bin(n) for n in range(a, b + 1)])).count('1')
def main():
tests = [
[2, 7, 11],
[0, 1, 1],
[1, 10, 17],
[8, 9, 3],
[9, 10, 4]
]
for t in tests:
res = rangeBitCount(t[0], t[1])
if t[2] == res:
print("PASSED: rangeBitCount({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: rangeBitCount({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, t[2]))
if __name__ == '__main__':
main()
|
|
cca072b6cf5b0162e1cf10d6873739d762a7a05e
|
examples/console_logger_signals.py
|
examples/console_logger_signals.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Log radiation hits or noise signals to the console.
Released under MIT License. See LICENSE file.
By Yoan Tournade <y@yoantournade.com>
"""
import time
from PiPocketGeiger import RadiationWatch
def example_run_context():
example_run_context.hit_flag = False
def onRadiation():
example_run_context.hit_flag = True
print("Ray appeared!")
def onNoise():
print("Noisy and moving around here!")
# Create the RadiationWatch object, specifying the used GPIO pins ...
with RadiationWatch(24, 23) as radiation_watch:
print("Waiting for gamma rays to hit the Pocket Geiger.")
radiation_watch.register_radiation_callback(onRadiation)
radiation_watch.register_noise_callback(onNoise)
while 1:
# ... and print readings after radiation hit.
if example_run_context.hit_flag:
print(radiation_watch.status())
example_run_context.hit_flag = False
time.sleep(3)
if __name__ == "__main__":
example_run_context()
|
Add example using callbacks/IRQs for easier debug
|
Add example using callbacks/IRQs for easier debug
|
Python
|
mit
|
MonsieurV/PiPocketGeiger
|
Add example using callbacks/IRQs for easier debug
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Log radiation hits or noise signals to the console.
Released under MIT License. See LICENSE file.
By Yoan Tournade <y@yoantournade.com>
"""
import time
from PiPocketGeiger import RadiationWatch
def example_run_context():
example_run_context.hit_flag = False
def onRadiation():
example_run_context.hit_flag = True
print("Ray appeared!")
def onNoise():
print("Noisy and moving around here!")
# Create the RadiationWatch object, specifying the used GPIO pins ...
with RadiationWatch(24, 23) as radiation_watch:
print("Waiting for gamma rays to hit the Pocket Geiger.")
radiation_watch.register_radiation_callback(onRadiation)
radiation_watch.register_noise_callback(onNoise)
while 1:
# ... and print readings after radiation hit.
if example_run_context.hit_flag:
print(radiation_watch.status())
example_run_context.hit_flag = False
time.sleep(3)
if __name__ == "__main__":
example_run_context()
|
<commit_before><commit_msg>Add example using callbacks/IRQs for easier debug<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Log radiation hits or noise signals to the console.
Released under MIT License. See LICENSE file.
By Yoan Tournade <y@yoantournade.com>
"""
import time
from PiPocketGeiger import RadiationWatch
def example_run_context():
example_run_context.hit_flag = False
def onRadiation():
example_run_context.hit_flag = True
print("Ray appeared!")
def onNoise():
print("Noisy and moving around here!")
# Create the RadiationWatch object, specifying the used GPIO pins ...
with RadiationWatch(24, 23) as radiation_watch:
print("Waiting for gamma rays to hit the Pocket Geiger.")
radiation_watch.register_radiation_callback(onRadiation)
radiation_watch.register_noise_callback(onNoise)
while 1:
# ... and print readings after radiation hit.
if example_run_context.hit_flag:
print(radiation_watch.status())
example_run_context.hit_flag = False
time.sleep(3)
if __name__ == "__main__":
example_run_context()
|
Add example using callbacks/IRQs for easier debug#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Log radiation hits or noise signals to the console.
Released under MIT License. See LICENSE file.
By Yoan Tournade <y@yoantournade.com>
"""
import time
from PiPocketGeiger import RadiationWatch
def example_run_context():
example_run_context.hit_flag = False
def onRadiation():
example_run_context.hit_flag = True
print("Ray appeared!")
def onNoise():
print("Noisy and moving around here!")
# Create the RadiationWatch object, specifying the used GPIO pins ...
with RadiationWatch(24, 23) as radiation_watch:
print("Waiting for gamma rays to hit the Pocket Geiger.")
radiation_watch.register_radiation_callback(onRadiation)
radiation_watch.register_noise_callback(onNoise)
while 1:
# ... and print readings after radiation hit.
if example_run_context.hit_flag:
print(radiation_watch.status())
example_run_context.hit_flag = False
time.sleep(3)
if __name__ == "__main__":
example_run_context()
|
<commit_before><commit_msg>Add example using callbacks/IRQs for easier debug<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Log radiation hits or noise signals to the console.
Released under MIT License. See LICENSE file.
By Yoan Tournade <y@yoantournade.com>
"""
import time
from PiPocketGeiger import RadiationWatch
def example_run_context():
example_run_context.hit_flag = False
def onRadiation():
example_run_context.hit_flag = True
print("Ray appeared!")
def onNoise():
print("Noisy and moving around here!")
# Create the RadiationWatch object, specifying the used GPIO pins ...
with RadiationWatch(24, 23) as radiation_watch:
print("Waiting for gamma rays to hit the Pocket Geiger.")
radiation_watch.register_radiation_callback(onRadiation)
radiation_watch.register_noise_callback(onNoise)
while 1:
# ... and print readings after radiation hit.
if example_run_context.hit_flag:
print(radiation_watch.status())
example_run_context.hit_flag = False
time.sleep(3)
if __name__ == "__main__":
example_run_context()
|
|
fe3f7ae8eb9390a4fe3f59e6244d4bbd6af7a9cd
|
mojo/services/html_viewer/view_url.py
|
mojo/services/html_viewer/view_url.py
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo" %
build_dir)
options.append('--args-for=mojo:kiosk_wm %s' % args.url)
app_to_run = "mojo:kiosk_wm"
return shell_command + options + [app_to_run]
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer in the Kiosk window manager. "
"You must have built //mojo/services/html_viewer and "
"//mojo/services/network first. Note that this will "
"currently often fail spectacularly due to lack of binary "
"stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
Add script to view URL in HTMLViewer
|
Add script to view URL in HTMLViewer
This script takes advantage of the fact that Mojo binaries are published in the
cloud to add functionality for viewing a URL in HTMLViewer embedded in the
kiosk window manager.
Review URL: https://codereview.chromium.org/982523004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319085}
|
Python
|
bsd-3-clause
|
TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,ltilve/chromium,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,Just-D/chromium-1,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,fujunwei/chromium-crosswalk,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,Chilledheart/chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,ltilve/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,Chilledheart/chromium,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,Fireblend/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,ltilve/chromium,Pluto-tv/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,ltilve/chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ltilve/chromium,TheTypoMaster/chromium-crosswalk,fujunwei/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1
|
Add script to view URL in HTMLViewer
This script takes advantage of the fact that Mojo binaries are published in the
cloud to add functionality for viewing a URL in HTMLViewer embedded in the
kiosk window manager.
Review URL: https://codereview.chromium.org/982523004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319085}
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo" %
build_dir)
options.append('--args-for=mojo:kiosk_wm %s' % args.url)
app_to_run = "mojo:kiosk_wm"
return shell_command + options + [app_to_run]
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer in the Kiosk window manager. "
"You must have built //mojo/services/html_viewer and "
"//mojo/services/network first. Note that this will "
"currently often fail spectacularly due to lack of binary "
"stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to view URL in HTMLViewer
This script takes advantage of the fact that Mojo binaries are published in the
cloud to add functionality for viewing a URL in HTMLViewer embedded in the
kiosk window manager.
Review URL: https://codereview.chromium.org/982523004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319085}<commit_after>
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo" %
build_dir)
options.append('--args-for=mojo:kiosk_wm %s' % args.url)
app_to_run = "mojo:kiosk_wm"
return shell_command + options + [app_to_run]
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer in the Kiosk window manager. "
"You must have built //mojo/services/html_viewer and "
"//mojo/services/network first. Note that this will "
"currently often fail spectacularly due to lack of binary "
"stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
Add script to view URL in HTMLViewer
This script takes advantage of the fact that Mojo binaries are published in the
cloud to add functionality for viewing a URL in HTMLViewer embedded in the
kiosk window manager.
Review URL: https://codereview.chromium.org/982523004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319085}#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo" %
build_dir)
options.append('--args-for=mojo:kiosk_wm %s' % args.url)
app_to_run = "mojo:kiosk_wm"
return shell_command + options + [app_to_run]
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer in the Kiosk window manager. "
"You must have built //mojo/services/html_viewer and "
"//mojo/services/network first. Note that this will "
"currently often fail spectacularly due to lack of binary "
"stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to view URL in HTMLViewer
This script takes advantage of the fact that Mojo binaries are published in the
cloud to add functionality for viewing a URL in HTMLViewer embedded in the
kiosk window manager.
Review URL: https://codereview.chromium.org/982523004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#319085}<commit_after>#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import subprocess
import sys
root_path = os.path.realpath(
os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
os.pardir,
os.pardir,
os.pardir))
def _BuildShellCommand(args):
sdk_version = subprocess.check_output(["cat",
"third_party/mojo/src/mojo/public/VERSION"], cwd=root_path)
build_dir = os.path.join(root_path, args.build_dir)
shell_command = [os.path.join(build_dir, "mojo_shell")]
options = []
options.append(
"--origin=https://storage.googleapis.com/mojo/services/linux-x64/%s" %
sdk_version)
options.append("--url-mappings=mojo:html_viewer=file://%s/html_viewer.mojo" %
build_dir)
options.append('--args-for=mojo:kiosk_wm %s' % args.url)
app_to_run = "mojo:kiosk_wm"
return shell_command + options + [app_to_run]
def main():
parser = argparse.ArgumentParser(
description="View a URL with HTMLViewer in the Kiosk window manager. "
"You must have built //mojo/services/html_viewer and "
"//mojo/services/network first. Note that this will "
"currently often fail spectacularly due to lack of binary "
"stability in Mojo.")
parser.add_argument(
"--build-dir",
help="Path to the dir containing the linux-x64 binaries relative to the "
"repo root (default: %(default)s)",
default="out/Release")
parser.add_argument("url",
help="The URL to be viewed")
args = parser.parse_args()
return subprocess.call(_BuildShellCommand(args))
if __name__ == '__main__':
sys.exit(main())
|
|
6a835fd8913cdd3a9dc76530230ae2c73d88b48f
|
tests/name_injection_test.py
|
tests/name_injection_test.py
|
"""Test for the name inject utility."""
from drudge import Drudge
def test_drudge_injects_names():
"""Test the name injection method of drudge."""
dr = Drudge(None) # Dummy drudge.
string_name = 'string_name'
dr.set_name(string_name)
dr.set_name(1, 'one')
dr.inject_names(suffix='_')
assert string_name_ == string_name
assert one_ == 1
|
Add tests for name injection
|
Add tests for name injection
Since the name injection facility could taint the entire global
namespace of the module, its test is put into a separate module.
|
Python
|
mit
|
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
|
Add tests for name injection
Since the name injection facility could taint the entire global
namespace of the module, its test is put into a separate module.
|
"""Test for the name inject utility."""
from drudge import Drudge
def test_drudge_injects_names():
"""Test the name injection method of drudge."""
dr = Drudge(None) # Dummy drudge.
string_name = 'string_name'
dr.set_name(string_name)
dr.set_name(1, 'one')
dr.inject_names(suffix='_')
assert string_name_ == string_name
assert one_ == 1
|
<commit_before><commit_msg>Add tests for name injection
Since the name injection facility could taint the entire global
namespace of the module, its test is put into a separate module.<commit_after>
|
"""Test for the name inject utility."""
from drudge import Drudge
def test_drudge_injects_names():
"""Test the name injection method of drudge."""
dr = Drudge(None) # Dummy drudge.
string_name = 'string_name'
dr.set_name(string_name)
dr.set_name(1, 'one')
dr.inject_names(suffix='_')
assert string_name_ == string_name
assert one_ == 1
|
Add tests for name injection
Since the name injection facility could taint the entire global
namespace of the module, its test is put into a separate module."""Test for the name inject utility."""
from drudge import Drudge
def test_drudge_injects_names():
"""Test the name injection method of drudge."""
dr = Drudge(None) # Dummy drudge.
string_name = 'string_name'
dr.set_name(string_name)
dr.set_name(1, 'one')
dr.inject_names(suffix='_')
assert string_name_ == string_name
assert one_ == 1
|
<commit_before><commit_msg>Add tests for name injection
Since the name injection facility could taint the entire global
namespace of the module, its test is put into a separate module.<commit_after>"""Test for the name inject utility."""
from drudge import Drudge
def test_drudge_injects_names():
"""Test the name injection method of drudge."""
dr = Drudge(None) # Dummy drudge.
string_name = 'string_name'
dr.set_name(string_name)
dr.set_name(1, 'one')
dr.inject_names(suffix='_')
assert string_name_ == string_name
assert one_ == 1
|
|
69aa2be4eca4ecfa9a73ad38c34bb7a4e46bef97
|
tests/test_epsilon_greedy.py
|
tests/test_epsilon_greedy.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import logging
import unittest
from explorers import epsilon_greedy
class TestEpsilonGreedy(unittest.TestCase):
def test_linear_decay_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.LinearDecayEpsilonGreedy(1.0, 0.1, 50,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 1.0)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
def test_constant_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.ConstantEpsilonGreedy(0.1,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 0.1)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
|
Add tests for epsilon greedy explorers
|
Add tests for epsilon greedy explorers
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
Add tests for epsilon greedy explorers
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import logging
import unittest
from explorers import epsilon_greedy
class TestEpsilonGreedy(unittest.TestCase):
def test_linear_decay_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.LinearDecayEpsilonGreedy(1.0, 0.1, 50,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 1.0)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
def test_constant_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.ConstantEpsilonGreedy(0.1,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 0.1)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
|
<commit_before><commit_msg>Add tests for epsilon greedy explorers<commit_after>
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import logging
import unittest
from explorers import epsilon_greedy
class TestEpsilonGreedy(unittest.TestCase):
def test_linear_decay_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.LinearDecayEpsilonGreedy(1.0, 0.1, 50,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 1.0)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
def test_constant_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.ConstantEpsilonGreedy(0.1,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 0.1)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
|
Add tests for epsilon greedy explorersfrom __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import logging
import unittest
from explorers import epsilon_greedy
class TestEpsilonGreedy(unittest.TestCase):
def test_linear_decay_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.LinearDecayEpsilonGreedy(1.0, 0.1, 50,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 1.0)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
def test_constant_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.ConstantEpsilonGreedy(0.1,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 0.1)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
|
<commit_before><commit_msg>Add tests for epsilon greedy explorers<commit_after>from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
import logging
import unittest
from explorers import epsilon_greedy
class TestEpsilonGreedy(unittest.TestCase):
def test_linear_decay_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.LinearDecayEpsilonGreedy(1.0, 0.1, 50,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 1.0)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
def test_constant_epsilon_greedy(self):
random_action_func_count = [0]
greedy_action_func_count = [0]
def random_action_func():
random_action_func_count[0] += 1
return 0
def greedy_action_func():
greedy_action_func_count[0] += 1
return 0
explorer = epsilon_greedy.ConstantEpsilonGreedy(0.1,
random_action_func)
explorer.logger.addHandler(logging.StreamHandler())
explorer.logger.setLevel(logging.DEBUG)
self.assertAlmostEqual(explorer.epsilon, 0.1)
for t in range(100):
explorer.select_action(t, greedy_action_func)
self.assertAlmostEqual(explorer.epsilon, 0.1)
|
|
86da129dd4d9665dc15218c1d5b4673ee33780f4
|
factory/tools/cat_logs.py
|
factory/tools/cat_logs.py
|
#!/bin/env python
#
# cat_logs.py
#
# Print out the logs for a certain date
#
# Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]
#
import sys,os,os.path,time
sys.path.append("lib")
sys.path.append("..")
sys.path.append("../../lib")
import gWftArgsHelper,gWftLogParser
import glideFactoryConfig
USAGE="Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]"
# return a GlideinDescript with
# factory_dir, date_arr and time_arr
def parse_args():
if len(sys.argv)<3:
raise ValueError,"Not enough arguments!"
factory_dir=sys.argv[1]
try:
glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)
glideinDescript=glideFactoryConfig.GlideinDescript()
except:
raise ValueError,"%s is not a factory!"%factory_dir
glideinDescript.factory_dir=factory_dir
glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv[2])
if len(sys.argv)>=4:
glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv[3])
else:
glideinDescript.time_arr=(0,0,0)
return glideinDescript
def main():
try:
glideinDescript=parse_args()
except ValueError, e:
sys.stderr.write("%s\n\n%s\n"%(e,USAGE))
sys.exit(1)
entries=glideinDescript.data['Entries'].split(',')
log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,"err")
for fname in log_list:
sys.stdout.write("%s\n"%fname)
sys.stdout.write("===========================================================\n")
fd=open(fname,"r")
sys.stdout.write(fd.read())
fd.close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
Print the logs for a certain date
|
Print the logs for a certain date
|
Python
|
bsd-3-clause
|
holzman/glideinwms-old,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old,bbockelm/glideinWMS
|
Print the logs for a certain date
|
#!/bin/env python
#
# cat_logs.py
#
# Print out the logs for a certain date
#
# Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]
#
import sys,os,os.path,time
sys.path.append("lib")
sys.path.append("..")
sys.path.append("../../lib")
import gWftArgsHelper,gWftLogParser
import glideFactoryConfig
USAGE="Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]"
# return a GlideinDescript with
# factory_dir, date_arr and time_arr
def parse_args():
if len(sys.argv)<3:
raise ValueError,"Not enough arguments!"
factory_dir=sys.argv[1]
try:
glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)
glideinDescript=glideFactoryConfig.GlideinDescript()
except:
raise ValueError,"%s is not a factory!"%factory_dir
glideinDescript.factory_dir=factory_dir
glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv[2])
if len(sys.argv)>=4:
glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv[3])
else:
glideinDescript.time_arr=(0,0,0)
return glideinDescript
def main():
try:
glideinDescript=parse_args()
except ValueError, e:
sys.stderr.write("%s\n\n%s\n"%(e,USAGE))
sys.exit(1)
entries=glideinDescript.data['Entries'].split(',')
log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,"err")
for fname in log_list:
sys.stdout.write("%s\n"%fname)
sys.stdout.write("===========================================================\n")
fd=open(fname,"r")
sys.stdout.write(fd.read())
fd.close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Print the logs for a certain date<commit_after>
|
#!/bin/env python
#
# cat_logs.py
#
# Print out the logs for a certain date
#
# Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]
#
import sys,os,os.path,time
sys.path.append("lib")
sys.path.append("..")
sys.path.append("../../lib")
import gWftArgsHelper,gWftLogParser
import glideFactoryConfig
USAGE="Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]"
# return a GlideinDescript with
# factory_dir, date_arr and time_arr
def parse_args():
if len(sys.argv)<3:
raise ValueError,"Not enough arguments!"
factory_dir=sys.argv[1]
try:
glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)
glideinDescript=glideFactoryConfig.GlideinDescript()
except:
raise ValueError,"%s is not a factory!"%factory_dir
glideinDescript.factory_dir=factory_dir
glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv[2])
if len(sys.argv)>=4:
glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv[3])
else:
glideinDescript.time_arr=(0,0,0)
return glideinDescript
def main():
try:
glideinDescript=parse_args()
except ValueError, e:
sys.stderr.write("%s\n\n%s\n"%(e,USAGE))
sys.exit(1)
entries=glideinDescript.data['Entries'].split(',')
log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,"err")
for fname in log_list:
sys.stdout.write("%s\n"%fname)
sys.stdout.write("===========================================================\n")
fd=open(fname,"r")
sys.stdout.write(fd.read())
fd.close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
Print the logs for a certain date#!/bin/env python
#
# cat_logs.py
#
# Print out the logs for a certain date
#
# Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]
#
import sys,os,os.path,time
sys.path.append("lib")
sys.path.append("..")
sys.path.append("../../lib")
import gWftArgsHelper,gWftLogParser
import glideFactoryConfig
USAGE="Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]"
# return a GlideinDescript with
# factory_dir, date_arr and time_arr
def parse_args():
if len(sys.argv)<3:
raise ValueError,"Not enough arguments!"
factory_dir=sys.argv[1]
try:
glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)
glideinDescript=glideFactoryConfig.GlideinDescript()
except:
raise ValueError,"%s is not a factory!"%factory_dir
glideinDescript.factory_dir=factory_dir
glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv[2])
if len(sys.argv)>=4:
glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv[3])
else:
glideinDescript.time_arr=(0,0,0)
return glideinDescript
def main():
try:
glideinDescript=parse_args()
except ValueError, e:
sys.stderr.write("%s\n\n%s\n"%(e,USAGE))
sys.exit(1)
entries=glideinDescript.data['Entries'].split(',')
log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,"err")
for fname in log_list:
sys.stdout.write("%s\n"%fname)
sys.stdout.write("===========================================================\n")
fd=open(fname,"r")
sys.stdout.write(fd.read())
fd.close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Print the logs for a certain date<commit_after>#!/bin/env python
#
# cat_logs.py
#
# Print out the logs for a certain date
#
# Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]
#
import sys,os,os.path,time
sys.path.append("lib")
sys.path.append("..")
sys.path.append("../../lib")
import gWftArgsHelper,gWftLogParser
import glideFactoryConfig
USAGE="Usage: cat_logs.py <factory> YY/MM/DD [hh:mm:ss]"
# return a GlideinDescript with
# factory_dir, date_arr and time_arr
def parse_args():
if len(sys.argv)<3:
raise ValueError,"Not enough arguments!"
factory_dir=sys.argv[1]
try:
glideFactoryConfig.factoryConfig.glidein_descript_file=os.path.join(factory_dir,glideFactoryConfig.factoryConfig.glidein_descript_file)
glideinDescript=glideFactoryConfig.GlideinDescript()
except:
raise ValueError,"%s is not a factory!"%factory_dir
glideinDescript.factory_dir=factory_dir
glideinDescript.date_arr=gWftArgsHelper.parse_date(sys.argv[2])
if len(sys.argv)>=4:
glideinDescript.time_arr=gWftArgsHelper.parse_time(sys.argv[3])
else:
glideinDescript.time_arr=(0,0,0)
return glideinDescript
def main():
try:
glideinDescript=parse_args()
except ValueError, e:
sys.stderr.write("%s\n\n%s\n"%(e,USAGE))
sys.exit(1)
entries=glideinDescript.data['Entries'].split(',')
log_list=gWftLogParser.get_glidein_logs(glideinDescript.factory_dir,entries,glideinDescript.date_arr,glideinDescript.time_arr,"err")
for fname in log_list:
sys.stdout.write("%s\n"%fname)
sys.stdout.write("===========================================================\n")
fd=open(fname,"r")
sys.stdout.write(fd.read())
fd.close()
sys.stdout.write("\n")
if __name__ == '__main__':
main()
|
|
6b0673334d14dca0e64ab9a760d8652b29e26b07
|
fs/test/test_mkdir.py
|
fs/test/test_mkdir.py
|
from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from fs.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import fs
def test_mkdir():
tmp = maketemp()
fs.path(tmp).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = fs.path(tmp).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
|
Add more tests for mkdir.
|
Add more tests for mkdir.
|
Python
|
mit
|
tv42/fs,nailor/filesystem
|
Add more tests for mkdir.
|
from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from fs.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import fs
def test_mkdir():
tmp = maketemp()
fs.path(tmp).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = fs.path(tmp).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
|
<commit_before><commit_msg>Add more tests for mkdir.<commit_after>
|
from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from fs.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import fs
def test_mkdir():
tmp = maketemp()
fs.path(tmp).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = fs.path(tmp).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
|
Add more tests for mkdir.from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from fs.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import fs
def test_mkdir():
tmp = maketemp()
fs.path(tmp).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = fs.path(tmp).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
|
<commit_before><commit_msg>Add more tests for mkdir.<commit_after>from __future__ import with_statement
from nose.tools import (
eq_ as eq,
)
from fs.test.util import (
maketemp,
assert_raises,
)
import errno
import os
import fs
def test_mkdir():
tmp = maketemp()
fs.path(tmp).child('foo').mkdir()
foo = os.path.join(tmp, 'foo')
assert os.path.isdir(foo)
def test_mkdir_bad_exists():
tmp = maketemp()
p = fs.path(tmp).child('foo')
with p.open('w') as f:
f.write('bar')
e = assert_raises(
OSError,
p.mkdir,
)
eq(e.errno, errno.EEXIST)
|
|
e3afe5628d42abb109f7e2b3be735ef02941051d
|
data/forms.py
|
data/forms.py
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
class JobTemplateForm(forms.Form):
name = forms.CharField(max_length=400)
template = forms.CharField(
widget=forms.Textarea(
attrs={
'cols': 50,
'rows': 26,
}),
required=False,
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div('name', css_class='col-xs-12'),
css_class='row'),
'template',
)
|
Create a form for the JobTemplate
|
Create a form for the JobTemplate
|
Python
|
mit
|
crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp,crcollins/chemtools-webapp
|
Create a form for the JobTemplate
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
class JobTemplateForm(forms.Form):
name = forms.CharField(max_length=400)
template = forms.CharField(
widget=forms.Textarea(
attrs={
'cols': 50,
'rows': 26,
}),
required=False,
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div('name', css_class='col-xs-12'),
css_class='row'),
'template',
)
|
<commit_before><commit_msg>Create a form for the JobTemplate<commit_after>
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
class JobTemplateForm(forms.Form):
name = forms.CharField(max_length=400)
template = forms.CharField(
widget=forms.Textarea(
attrs={
'cols': 50,
'rows': 26,
}),
required=False,
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div('name', css_class='col-xs-12'),
css_class='row'),
'template',
)
|
Create a form for the JobTemplatefrom django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
class JobTemplateForm(forms.Form):
name = forms.CharField(max_length=400)
template = forms.CharField(
widget=forms.Textarea(
attrs={
'cols': 50,
'rows': 26,
}),
required=False,
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div('name', css_class='col-xs-12'),
css_class='row'),
'template',
)
|
<commit_before><commit_msg>Create a form for the JobTemplate<commit_after>from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div
class JobTemplateForm(forms.Form):
name = forms.CharField(max_length=400)
template = forms.CharField(
widget=forms.Textarea(
attrs={
'cols': 50,
'rows': 26,
}),
required=False,
)
helper = FormHelper()
helper.form_tag = False
helper.layout = Layout(
Div(
Div('name', css_class='col-xs-12'),
css_class='row'),
'template',
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.