commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
ffef9ccb5b7aa4c4ef8e06879c3ab7fd8030c882
corehq/apps/users/management/commands/accept_invite.py
corehq/apps/users/management/commands/accept_invite.py
from django.core.management.base import BaseCommand from corehq.apps.users.models import WebUser from corehq.apps.users.models import Invitation, CouchUser class Command(BaseCommand): help = "Accepts an invite into a domain for an existing web user" def add_arguments(self, parser): parser.add_argument('username') parser.add_argument('domain') def handle(self, username, domain, **options): try: invitation = Invitation.objects.get(domain=domain, email=username, is_accepted=False) except: print("No invites found for %s in Project Space (%s)" % (username, domain)) return user = CouchUser.get_by_username(username) if not user: print("No existing web users active for email address %s. This command can only activate existing web users" % username) return print("Accepting %s's invite to Project Space(%s)" % (username, domain)) user.add_as_web_user(invitation.domain, role=invitation.role, location_id=invitation.supply_point, program_id=invitation.program) invitation.is_accepted = True invitation.save() print("Operation completed")
Add a management command for accepting invites
Add a management command for accepting invites
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
Add a management command for accepting invites
from django.core.management.base import BaseCommand from corehq.apps.users.models import WebUser from corehq.apps.users.models import Invitation, CouchUser class Command(BaseCommand): help = "Accepts an invite into a domain for an existing web user" def add_arguments(self, parser): parser.add_argument('username') parser.add_argument('domain') def handle(self, username, domain, **options): try: invitation = Invitation.objects.get(domain=domain, email=username, is_accepted=False) except: print("No invites found for %s in Project Space (%s)" % (username, domain)) return user = CouchUser.get_by_username(username) if not user: print("No existing web users active for email address %s. This command can only activate existing web users" % username) return print("Accepting %s's invite to Project Space(%s)" % (username, domain)) user.add_as_web_user(invitation.domain, role=invitation.role, location_id=invitation.supply_point, program_id=invitation.program) invitation.is_accepted = True invitation.save() print("Operation completed")
<commit_before><commit_msg>Add a management command for accepting invites<commit_after>
from django.core.management.base import BaseCommand from corehq.apps.users.models import WebUser from corehq.apps.users.models import Invitation, CouchUser class Command(BaseCommand): help = "Accepts an invite into a domain for an existing web user" def add_arguments(self, parser): parser.add_argument('username') parser.add_argument('domain') def handle(self, username, domain, **options): try: invitation = Invitation.objects.get(domain=domain, email=username, is_accepted=False) except: print("No invites found for %s in Project Space (%s)" % (username, domain)) return user = CouchUser.get_by_username(username) if not user: print("No existing web users active for email address %s. This command can only activate existing web users" % username) return print("Accepting %s's invite to Project Space(%s)" % (username, domain)) user.add_as_web_user(invitation.domain, role=invitation.role, location_id=invitation.supply_point, program_id=invitation.program) invitation.is_accepted = True invitation.save() print("Operation completed")
Add a management command for accepting invitesfrom django.core.management.base import BaseCommand from corehq.apps.users.models import WebUser from corehq.apps.users.models import Invitation, CouchUser class Command(BaseCommand): help = "Accepts an invite into a domain for an existing web user" def add_arguments(self, parser): parser.add_argument('username') parser.add_argument('domain') def handle(self, username, domain, **options): try: invitation = Invitation.objects.get(domain=domain, email=username, is_accepted=False) except: print("No invites found for %s in Project Space (%s)" % (username, domain)) return user = CouchUser.get_by_username(username) if not user: print("No existing web users active for email address %s. This command can only activate existing web users" % username) return print("Accepting %s's invite to Project Space(%s)" % (username, domain)) user.add_as_web_user(invitation.domain, role=invitation.role, location_id=invitation.supply_point, program_id=invitation.program) invitation.is_accepted = True invitation.save() print("Operation completed")
<commit_before><commit_msg>Add a management command for accepting invites<commit_after>from django.core.management.base import BaseCommand from corehq.apps.users.models import WebUser from corehq.apps.users.models import Invitation, CouchUser class Command(BaseCommand): help = "Accepts an invite into a domain for an existing web user" def add_arguments(self, parser): parser.add_argument('username') parser.add_argument('domain') def handle(self, username, domain, **options): try: invitation = Invitation.objects.get(domain=domain, email=username, is_accepted=False) except: print("No invites found for %s in Project Space (%s)" % (username, domain)) return user = CouchUser.get_by_username(username) if not user: print("No existing web users active for email address %s. This command can only activate existing web users" % username) return print("Accepting %s's invite to Project Space(%s)" % (username, domain)) user.add_as_web_user(invitation.domain, role=invitation.role, location_id=invitation.supply_point, program_id=invitation.program) invitation.is_accepted = True invitation.save() print("Operation completed")
d92b757a3cd1ad56da4207740732d16076ce561c
comics/crawler/crawlers/gunshow.py
comics/crawler/crawlers/gunshow.py
from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Gun Show' language = 'en' url = 'http://www.gunshowcomic.com/' start_date = '2008-09-04' history_capable_date = '2008-09-04' schedule = 'Mo,Tu,We,Th,Fr' rights = '"Lord KC Green"' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.web_url = 'http://www.gunshowcomic.com/d/%(date)s.html' % { 'date': self.pub_date.strftime('%Y%m%d'), } self.parse_web_page() for img in self.web_page.imgs: if 'src' in img and img['src'].startswith('/comics/'): self.url = self.join_web_url(img['src']) return
Add crawler for 'Gun show'
Add crawler for 'Gun show'
Python
agpl-3.0
klette/comics,datagutten/comics,datagutten/comics,jodal/comics,jodal/comics,klette/comics,datagutten/comics,jodal/comics,klette/comics,datagutten/comics,jodal/comics
Add crawler for 'Gun show'
from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Gun Show' language = 'en' url = 'http://www.gunshowcomic.com/' start_date = '2008-09-04' history_capable_date = '2008-09-04' schedule = 'Mo,Tu,We,Th,Fr' rights = '"Lord KC Green"' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.web_url = 'http://www.gunshowcomic.com/d/%(date)s.html' % { 'date': self.pub_date.strftime('%Y%m%d'), } self.parse_web_page() for img in self.web_page.imgs: if 'src' in img and img['src'].startswith('/comics/'): self.url = self.join_web_url(img['src']) return
<commit_before><commit_msg>Add crawler for 'Gun show'<commit_after>
from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Gun Show' language = 'en' url = 'http://www.gunshowcomic.com/' start_date = '2008-09-04' history_capable_date = '2008-09-04' schedule = 'Mo,Tu,We,Th,Fr' rights = '"Lord KC Green"' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.web_url = 'http://www.gunshowcomic.com/d/%(date)s.html' % { 'date': self.pub_date.strftime('%Y%m%d'), } self.parse_web_page() for img in self.web_page.imgs: if 'src' in img and img['src'].startswith('/comics/'): self.url = self.join_web_url(img['src']) return
Add crawler for 'Gun show'from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Gun Show' language = 'en' url = 'http://www.gunshowcomic.com/' start_date = '2008-09-04' history_capable_date = '2008-09-04' schedule = 'Mo,Tu,We,Th,Fr' rights = '"Lord KC Green"' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.web_url = 'http://www.gunshowcomic.com/d/%(date)s.html' % { 'date': self.pub_date.strftime('%Y%m%d'), } self.parse_web_page() for img in self.web_page.imgs: if 'src' in img and img['src'].startswith('/comics/'): self.url = self.join_web_url(img['src']) return
<commit_before><commit_msg>Add crawler for 'Gun show'<commit_after>from comics.crawler.base import BaseComicCrawler from comics.crawler.meta import BaseComicMeta class ComicMeta(BaseComicMeta): name = 'Gun Show' language = 'en' url = 'http://www.gunshowcomic.com/' start_date = '2008-09-04' history_capable_date = '2008-09-04' schedule = 'Mo,Tu,We,Th,Fr' rights = '"Lord KC Green"' class ComicCrawler(BaseComicCrawler): def _get_url(self): self.web_url = 'http://www.gunshowcomic.com/d/%(date)s.html' % { 'date': self.pub_date.strftime('%Y%m%d'), } self.parse_web_page() for img in self.web_page.imgs: if 'src' in img and img['src'].startswith('/comics/'): self.url = self.join_web_url(img['src']) return
5f6dfb8d6cf37bf6237e18423799c94dd2741ee3
ideascube/conf/kb_ifb_bdi.py
ideascube/conf/kb_ifb_bdi.py
# -*- coding: utf-8 -*- """KoomBook conf""" from .base import * # noqa from django.utils.translation import ugettext_lazy as _ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(os.environ.get('DEBUG', True)) TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1'] LANGUAGE_CODE = 'fr' TIME_ZONE = None # Ideas Box specifics STORAGE_ROOT = '/media/hdd/ideascube/storage' IDEASCUBE_NAME = 'Institut Français Burundi' DOMAIN = 'koombook.lan' BACKUP_FORMAT = 'gztar' STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']] HOME_CARDS = STAFF_HOME_CARDS + [ { 'id': 'blog', }, { 'id': 'mediacenter', }, { 'id': 'wikipedia', }, { 'id': 'khanacademy', }, { 'id': 'vikidia', }, { 'id': 'appinventor', }, { 'id': 'gutenberg', }, ] IDEASCUBE_BODY_ID = 'koombook'
Add conf file for ifb burundi
Add conf file for ifb burundi
Python
agpl-3.0
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
Add conf file for ifb burundi
# -*- coding: utf-8 -*- """KoomBook conf""" from .base import * # noqa from django.utils.translation import ugettext_lazy as _ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(os.environ.get('DEBUG', True)) TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1'] LANGUAGE_CODE = 'fr' TIME_ZONE = None # Ideas Box specifics STORAGE_ROOT = '/media/hdd/ideascube/storage' IDEASCUBE_NAME = 'Institut Français Burundi' DOMAIN = 'koombook.lan' BACKUP_FORMAT = 'gztar' STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']] HOME_CARDS = STAFF_HOME_CARDS + [ { 'id': 'blog', }, { 'id': 'mediacenter', }, { 'id': 'wikipedia', }, { 'id': 'khanacademy', }, { 'id': 'vikidia', }, { 'id': 'appinventor', }, { 'id': 'gutenberg', }, ] IDEASCUBE_BODY_ID = 'koombook'
<commit_before><commit_msg>Add conf file for ifb burundi<commit_after>
# -*- coding: utf-8 -*- """KoomBook conf""" from .base import * # noqa from django.utils.translation import ugettext_lazy as _ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(os.environ.get('DEBUG', True)) TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1'] LANGUAGE_CODE = 'fr' TIME_ZONE = None # Ideas Box specifics STORAGE_ROOT = '/media/hdd/ideascube/storage' IDEASCUBE_NAME = 'Institut Français Burundi' DOMAIN = 'koombook.lan' BACKUP_FORMAT = 'gztar' STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']] HOME_CARDS = STAFF_HOME_CARDS + [ { 'id': 'blog', }, { 'id': 'mediacenter', }, { 'id': 'wikipedia', }, { 'id': 'khanacademy', }, { 'id': 'vikidia', }, { 'id': 'appinventor', }, { 'id': 'gutenberg', }, ] IDEASCUBE_BODY_ID = 'koombook'
Add conf file for ifb burundi# -*- coding: utf-8 -*- """KoomBook conf""" from .base import * # noqa from django.utils.translation import ugettext_lazy as _ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(os.environ.get('DEBUG', True)) TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1'] LANGUAGE_CODE = 'fr' TIME_ZONE = None # Ideas Box specifics STORAGE_ROOT = '/media/hdd/ideascube/storage' IDEASCUBE_NAME = 'Institut Français Burundi' DOMAIN = 'koombook.lan' BACKUP_FORMAT = 'gztar' STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']] HOME_CARDS = STAFF_HOME_CARDS + [ { 'id': 'blog', }, { 'id': 'mediacenter', }, { 'id': 'wikipedia', }, { 'id': 'khanacademy', }, { 'id': 'vikidia', }, { 'id': 'appinventor', }, { 'id': 'gutenberg', }, ] IDEASCUBE_BODY_ID = 'koombook'
<commit_before><commit_msg>Add conf file for ifb burundi<commit_after># -*- coding: utf-8 -*- """KoomBook conf""" from .base import * # noqa from django.utils.translation import ugettext_lazy as _ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(os.environ.get('DEBUG', True)) TEMPLATE_DEBUG = False ALLOWED_HOSTS = ['.koombook.lan.', 'localhost', '127.0.0.1'] LANGUAGE_CODE = 'fr' TIME_ZONE = None # Ideas Box specifics STORAGE_ROOT = '/media/hdd/ideascube/storage' IDEASCUBE_NAME = 'Institut Français Burundi' DOMAIN = 'koombook.lan' BACKUP_FORMAT = 'gztar' STAFF_HOME_CARDS = [c for c in STAFF_HOME_CARDS if c['url'] in ['user_list', 'server:power', 'server:backup']] HOME_CARDS = STAFF_HOME_CARDS + [ { 'id': 'blog', }, { 'id': 'mediacenter', }, { 'id': 'wikipedia', }, { 'id': 'khanacademy', }, { 'id': 'vikidia', }, { 'id': 'appinventor', }, { 'id': 'gutenberg', }, ] IDEASCUBE_BODY_ID = 'koombook'
8f2f89129d24cdaa6bc37e4fec885ac78aa30ce4
test/automl/test_models.py
test/automl/test_models.py
# -*- encoding: utf-8 -*- from __future__ import print_function import unittest import mock from autosklearn.automl import AutoML from autosklearn.util.backend import Backend class AutoMLStub(object): def __init__(self): self.__class__ = AutoML class AutoMlModelsTest(unittest.TestCase): def setUp(self): self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_only_loads_ensemble_models(self): identifiers = [(1, 2), (3, 4)] models = [ 42 ] self.automl._backend.load_ensemble.return_value.identifiers_ \ = identifiers self.automl._backend.load_models_by_identifiers.side_effect \ = lambda ids: models if ids is identifiers else None self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_loads_all_models_if_no_ensemble(self): models = [ 42 ] self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = models self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = [] self.assertRaises(ValueError, self.automl._load_models)
Test AutoML usage of Backend to load models
Test AutoML usage of Backend to load models
Python
bsd-3-clause
automl/auto-sklearn,automl/auto-sklearn
Test AutoML usage of Backend to load models
# -*- encoding: utf-8 -*- from __future__ import print_function import unittest import mock from autosklearn.automl import AutoML from autosklearn.util.backend import Backend class AutoMLStub(object): def __init__(self): self.__class__ = AutoML class AutoMlModelsTest(unittest.TestCase): def setUp(self): self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_only_loads_ensemble_models(self): identifiers = [(1, 2), (3, 4)] models = [ 42 ] self.automl._backend.load_ensemble.return_value.identifiers_ \ = identifiers self.automl._backend.load_models_by_identifiers.side_effect \ = lambda ids: models if ids is identifiers else None self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_loads_all_models_if_no_ensemble(self): models = [ 42 ] self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = models self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = [] self.assertRaises(ValueError, self.automl._load_models)
<commit_before><commit_msg>Test AutoML usage of Backend to load models<commit_after>
# -*- encoding: utf-8 -*- from __future__ import print_function import unittest import mock from autosklearn.automl import AutoML from autosklearn.util.backend import Backend class AutoMLStub(object): def __init__(self): self.__class__ = AutoML class AutoMlModelsTest(unittest.TestCase): def setUp(self): self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_only_loads_ensemble_models(self): identifiers = [(1, 2), (3, 4)] models = [ 42 ] self.automl._backend.load_ensemble.return_value.identifiers_ \ = identifiers self.automl._backend.load_models_by_identifiers.side_effect \ = lambda ids: models if ids is identifiers else None self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_loads_all_models_if_no_ensemble(self): models = [ 42 ] self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = models self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = [] self.assertRaises(ValueError, self.automl._load_models)
Test AutoML usage of Backend to load models# -*- encoding: utf-8 -*- from __future__ import print_function import unittest import mock from autosklearn.automl import AutoML from autosklearn.util.backend import Backend class AutoMLStub(object): def __init__(self): self.__class__ = AutoML class AutoMlModelsTest(unittest.TestCase): def setUp(self): self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_only_loads_ensemble_models(self): identifiers = [(1, 2), (3, 4)] models = [ 42 ] self.automl._backend.load_ensemble.return_value.identifiers_ \ = identifiers self.automl._backend.load_models_by_identifiers.side_effect \ = lambda ids: models if ids is identifiers else None self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_loads_all_models_if_no_ensemble(self): models = [ 42 ] self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = models self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = [] self.assertRaises(ValueError, self.automl._load_models)
<commit_before><commit_msg>Test AutoML usage of Backend to load models<commit_after># -*- encoding: utf-8 -*- from __future__ import print_function import unittest import mock from autosklearn.automl import AutoML from autosklearn.util.backend import Backend class AutoMLStub(object): def __init__(self): self.__class__ = AutoML class AutoMlModelsTest(unittest.TestCase): def setUp(self): self.automl = AutoMLStub() self.automl._shared_mode = False self.automl._seed = 42 self.automl._backend = mock.Mock(spec=Backend) self.automl._delete_output_directories = lambda: 0 def test_only_loads_ensemble_models(self): identifiers = [(1, 2), (3, 4)] models = [ 42 ] self.automl._backend.load_ensemble.return_value.identifiers_ \ = identifiers self.automl._backend.load_models_by_identifiers.side_effect \ = lambda ids: models if ids is identifiers else None self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_loads_all_models_if_no_ensemble(self): models = [ 42 ] self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = models self.automl._load_models() self.assertEqual(models, self.automl.models_) def test_raises_if_no_models(self): self.automl._backend.load_ensemble.return_value = None self.automl._backend.load_all_models.return_value = [] self.assertRaises(ValueError, self.automl._load_models)
05e95158055e869be3bf4e98af8521afc8321ec4
lintcode/Medium/116_Jump_Game.py
lintcode/Medium/116_Jump_Game.py
class Solution: # @param A, a list of integers # @return a boolean def canJump(self, A): # write your code here # Brute Force jumpable = [False] * len(A) jumpable[0] = True for i in range(len(A)): if (jumpable[i]): for j in range(1, A[i] + 1): if (i + j < len(jumpable)): jumpable[i + j] = True return jumpable[len(A) - 1]
Add solution to lintcode question 116
Add solution to lintcode question 116
Python
mit
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
Add solution to lintcode question 116
class Solution: # @param A, a list of integers # @return a boolean def canJump(self, A): # write your code here # Brute Force jumpable = [False] * len(A) jumpable[0] = True for i in range(len(A)): if (jumpable[i]): for j in range(1, A[i] + 1): if (i + j < len(jumpable)): jumpable[i + j] = True return jumpable[len(A) - 1]
<commit_before><commit_msg>Add solution to lintcode question 116<commit_after>
class Solution: # @param A, a list of integers # @return a boolean def canJump(self, A): # write your code here # Brute Force jumpable = [False] * len(A) jumpable[0] = True for i in range(len(A)): if (jumpable[i]): for j in range(1, A[i] + 1): if (i + j < len(jumpable)): jumpable[i + j] = True return jumpable[len(A) - 1]
Add solution to lintcode question 116class Solution: # @param A, a list of integers # @return a boolean def canJump(self, A): # write your code here # Brute Force jumpable = [False] * len(A) jumpable[0] = True for i in range(len(A)): if (jumpable[i]): for j in range(1, A[i] + 1): if (i + j < len(jumpable)): jumpable[i + j] = True return jumpable[len(A) - 1]
<commit_before><commit_msg>Add solution to lintcode question 116<commit_after>class Solution: # @param A, a list of integers # @return a boolean def canJump(self, A): # write your code here # Brute Force jumpable = [False] * len(A) jumpable[0] = True for i in range(len(A)): if (jumpable[i]): for j in range(1, A[i] + 1): if (i + j < len(jumpable)): jumpable[i + j] = True return jumpable[len(A) - 1]
4a817aff14ca6bc9717bd617d5bc49d15e698272
teuthology/orchestra/test/test_console.py
teuthology/orchestra/test/test_console.py
from teuthology.config import config as teuth_config from .. import console class TestConsole(object): pass class TestPhysicalConsole(TestConsole): klass = console.PhysicalConsole def setup(self): teuth_config.ipmi_domain = 'ipmi_domain' teuth_config.ipmi_user = 'ipmi_user' teuth_config.ipmi_password = 'ipmi_pass' self.hostname = 'host' def test_build_command(self): cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' cons = self.klass( self.hostname, teuth_config.ipmi_user, teuth_config.ipmi_password, teuth_config.ipmi_domain, ) sol_cmd = cons._build_command('sol activate') assert sol_cmd == cmd_templ.format( h=self.hostname, d=teuth_config.ipmi_domain, u=teuth_config.ipmi_user, p=teuth_config.ipmi_password, c='sol activate', ) pc_cmd = cons._build_command('power cycle') assert pc_cmd == sol_cmd.replace('sol activate', 'power cycle')
Add some tests for the console module
Add some tests for the console module ... better late than never? Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>
Python
mit
ceph/teuthology,dmick/teuthology,SUSE/teuthology,dmick/teuthology,SUSE/teuthology,ktdreyer/teuthology,dmick/teuthology,ktdreyer/teuthology,ceph/teuthology,SUSE/teuthology
Add some tests for the console module ... better late than never? Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>
from teuthology.config import config as teuth_config from .. import console class TestConsole(object): pass class TestPhysicalConsole(TestConsole): klass = console.PhysicalConsole def setup(self): teuth_config.ipmi_domain = 'ipmi_domain' teuth_config.ipmi_user = 'ipmi_user' teuth_config.ipmi_password = 'ipmi_pass' self.hostname = 'host' def test_build_command(self): cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' cons = self.klass( self.hostname, teuth_config.ipmi_user, teuth_config.ipmi_password, teuth_config.ipmi_domain, ) sol_cmd = cons._build_command('sol activate') assert sol_cmd == cmd_templ.format( h=self.hostname, d=teuth_config.ipmi_domain, u=teuth_config.ipmi_user, p=teuth_config.ipmi_password, c='sol activate', ) pc_cmd = cons._build_command('power cycle') assert pc_cmd == sol_cmd.replace('sol activate', 'power cycle')
<commit_before><commit_msg>Add some tests for the console module ... better late than never? Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com><commit_after>
from teuthology.config import config as teuth_config from .. import console class TestConsole(object): pass class TestPhysicalConsole(TestConsole): klass = console.PhysicalConsole def setup(self): teuth_config.ipmi_domain = 'ipmi_domain' teuth_config.ipmi_user = 'ipmi_user' teuth_config.ipmi_password = 'ipmi_pass' self.hostname = 'host' def test_build_command(self): cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' cons = self.klass( self.hostname, teuth_config.ipmi_user, teuth_config.ipmi_password, teuth_config.ipmi_domain, ) sol_cmd = cons._build_command('sol activate') assert sol_cmd == cmd_templ.format( h=self.hostname, d=teuth_config.ipmi_domain, u=teuth_config.ipmi_user, p=teuth_config.ipmi_password, c='sol activate', ) pc_cmd = cons._build_command('power cycle') assert pc_cmd == sol_cmd.replace('sol activate', 'power cycle')
Add some tests for the console module ... better late than never? Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com>from teuthology.config import config as teuth_config from .. import console class TestConsole(object): pass class TestPhysicalConsole(TestConsole): klass = console.PhysicalConsole def setup(self): teuth_config.ipmi_domain = 'ipmi_domain' teuth_config.ipmi_user = 'ipmi_user' teuth_config.ipmi_password = 'ipmi_pass' self.hostname = 'host' def test_build_command(self): cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' cons = self.klass( self.hostname, teuth_config.ipmi_user, teuth_config.ipmi_password, teuth_config.ipmi_domain, ) sol_cmd = cons._build_command('sol activate') assert sol_cmd == cmd_templ.format( h=self.hostname, d=teuth_config.ipmi_domain, u=teuth_config.ipmi_user, p=teuth_config.ipmi_password, c='sol activate', ) pc_cmd = cons._build_command('power cycle') assert pc_cmd == sol_cmd.replace('sol activate', 'power cycle')
<commit_before><commit_msg>Add some tests for the console module ... better late than never? Signed-off-by: Zack Cerza <d7cdf09fc0f0426e98c9978ee42da5d61fa54986@redhat.com><commit_after>from teuthology.config import config as teuth_config from .. import console class TestConsole(object): pass class TestPhysicalConsole(TestConsole): klass = console.PhysicalConsole def setup(self): teuth_config.ipmi_domain = 'ipmi_domain' teuth_config.ipmi_user = 'ipmi_user' teuth_config.ipmi_password = 'ipmi_pass' self.hostname = 'host' def test_build_command(self): cmd_templ = 'ipmitool -H {h}.{d} -I lanplus -U {u} -P {p} {c}' cons = self.klass( self.hostname, teuth_config.ipmi_user, teuth_config.ipmi_password, teuth_config.ipmi_domain, ) sol_cmd = cons._build_command('sol activate') assert sol_cmd == cmd_templ.format( h=self.hostname, d=teuth_config.ipmi_domain, u=teuth_config.ipmi_user, p=teuth_config.ipmi_password, c='sol activate', ) pc_cmd = cons._build_command('power cycle') assert pc_cmd == sol_cmd.replace('sol activate', 'power cycle')
f55f555e4df4cc7423a76b1c73160250a1a279ab
migrations/versions/0165_another_letter_org.py
migrations/versions/0165_another_letter_org.py
"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
Add letter logo for Welsh Revenue Authority
Add letter logo for Welsh Revenue Authority Depends on: - [ ] https://github.com/alphagov/notifications-template-preview/pull/94
Python
mit
alphagov/notifications-api,alphagov/notifications-api
Add letter logo for Welsh Revenue Authority Depends on: - [ ] https://github.com/alphagov/notifications-template-preview/pull/94
"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
<commit_before><commit_msg>Add letter logo for Welsh Revenue Authority Depends on: - [ ] https://github.com/alphagov/notifications-template-preview/pull/94<commit_after>
"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
Add letter logo for Welsh Revenue Authority Depends on: - [ ] https://github.com/alphagov/notifications-template-preview/pull/94"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
<commit_before><commit_msg>Add letter logo for Welsh Revenue Authority Depends on: - [ ] https://github.com/alphagov/notifications-template-preview/pull/94<commit_after>"""empty message Revision ID: 0165_another_letter_org Revises: 0164_add_organisation_to_service Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0165_another_letter_org' down_revision = '0164_add_organisation_to_service' from alembic import op NEW_ORGANISATIONS = [ ('502', 'Welsh Revenue Authority'), ] def upgrade(): for numeric_id, name in NEW_ORGANISATIONS: op.execute(""" INSERT INTO dvla_organisation VALUES ('{}', '{}') """.format(numeric_id, name)) def downgrade(): for numeric_id, _ in NEW_ORGANISATIONS: op.execute(""" DELETE FROM dvla_organisation WHERE id = '{}' """.format(numeric_id))
66cfc30621c7a2bc5ed8685cd25ec4973a21e6c5
migrations/versions/410_remove_empty_drafts.py
migrations/versions/410_remove_empty_drafts.py
"""Remove empty drafts Revision ID: 410_remove_empty_drafts Revises: 400_drop_agreement_returned Create Date: 2015-11-09 11:41:00.000000 """ # revision identifiers, used by Alembic. revision = '410_remove_empty_drafts' down_revision = '400_drop_agreement_returned' from alembic import op def upgrade(): op.execute(""" DELETE FROM draft_services WHERE (data->>'serviceName') is NULL; """) def downgrade(): pass
Remove drafts with no serviceName
Remove drafts with no serviceName In production we have 1237 draft services from 806 suppliers that only have a lot selected but no other data. These have never been counted or shown to suppliers, but with the new lot changes for DOS they will be counted in G-7 draft services. This removes these lot-only drafts so that draft counts will remain the same once the latest changes to supplier frontend go out.
Python
mit
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
Remove drafts with no serviceName In production we have 1237 draft services from 806 suppliers that only have a lot selected but no other data. These have never been counted or shown to suppliers, but with the new lot changes for DOS they will be counted in G-7 draft services. This removes these lot-only drafts so that draft counts will remain the same once the latest changes to supplier frontend go out.
"""Remove empty drafts Revision ID: 410_remove_empty_drafts Revises: 400_drop_agreement_returned Create Date: 2015-11-09 11:41:00.000000 """ # revision identifiers, used by Alembic. revision = '410_remove_empty_drafts' down_revision = '400_drop_agreement_returned' from alembic import op def upgrade(): op.execute(""" DELETE FROM draft_services WHERE (data->>'serviceName') is NULL; """) def downgrade(): pass
<commit_before><commit_msg>Remove drafts with no serviceName In production we have 1237 draft services from 806 suppliers that only have a lot selected but no other data. These have never been counted or shown to suppliers, but with the new lot changes for DOS they will be counted in G-7 draft services. This removes these lot-only drafts so that draft counts will remain the same once the latest changes to supplier frontend go out.<commit_after>
"""Remove empty drafts Revision ID: 410_remove_empty_drafts Revises: 400_drop_agreement_returned Create Date: 2015-11-09 11:41:00.000000 """ # revision identifiers, used by Alembic. revision = '410_remove_empty_drafts' down_revision = '400_drop_agreement_returned' from alembic import op def upgrade(): op.execute(""" DELETE FROM draft_services WHERE (data->>'serviceName') is NULL; """) def downgrade(): pass
Remove drafts with no serviceName In production we have 1237 draft services from 806 suppliers that only have a lot selected but no other data. These have never been counted or shown to suppliers, but with the new lot changes for DOS they will be counted in G-7 draft services. This removes these lot-only drafts so that draft counts will remain the same once the latest changes to supplier frontend go out."""Remove empty drafts Revision ID: 410_remove_empty_drafts Revises: 400_drop_agreement_returned Create Date: 2015-11-09 11:41:00.000000 """ # revision identifiers, used by Alembic. revision = '410_remove_empty_drafts' down_revision = '400_drop_agreement_returned' from alembic import op def upgrade(): op.execute(""" DELETE FROM draft_services WHERE (data->>'serviceName') is NULL; """) def downgrade(): pass
<commit_before><commit_msg>Remove drafts with no serviceName In production we have 1237 draft services from 806 suppliers that only have a lot selected but no other data. These have never been counted or shown to suppliers, but with the new lot changes for DOS they will be counted in G-7 draft services. This removes these lot-only drafts so that draft counts will remain the same once the latest changes to supplier frontend go out.<commit_after>"""Remove empty drafts Revision ID: 410_remove_empty_drafts Revises: 400_drop_agreement_returned Create Date: 2015-11-09 11:41:00.000000 """ # revision identifiers, used by Alembic. revision = '410_remove_empty_drafts' down_revision = '400_drop_agreement_returned' from alembic import op def upgrade(): op.execute(""" DELETE FROM draft_services WHERE (data->>'serviceName') is NULL; """) def downgrade(): pass
449089dc4a62ca73c1f5022535e5cbd6a92445f4
build-cutline-map.py
build-cutline-map.py
#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os.path driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("cutline-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) for fn in glob("cutlines/*.json"): tile_id = os.path.splitext(os.path.basename(fn))[0] cutline_ds = ogr.Open(fn) cutline_layer = cutline_ds.GetLayerByIndex(0) cutline_feature = cutline_layer.GetNextFeature() while cutline_feature: poly = cutline_feature.GetGeometryRef().Clone() feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() cutline_feature = cutline_layer.GetNextFeature() ds.Destroy()
Add script to build cutline map
Add script to build cutline map
Python
mit
simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic
Add script to build cutline map
#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os.path driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("cutline-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) for fn in glob("cutlines/*.json"): tile_id = os.path.splitext(os.path.basename(fn))[0] cutline_ds = ogr.Open(fn) cutline_layer = cutline_ds.GetLayerByIndex(0) cutline_feature = cutline_layer.GetNextFeature() while cutline_feature: poly = cutline_feature.GetGeometryRef().Clone() feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() cutline_feature = cutline_layer.GetNextFeature() ds.Destroy()
<commit_before><commit_msg>Add script to build cutline map<commit_after>
#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os.path driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("cutline-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) for fn in glob("cutlines/*.json"): tile_id = os.path.splitext(os.path.basename(fn))[0] cutline_ds = ogr.Open(fn) cutline_layer = cutline_ds.GetLayerByIndex(0) cutline_feature = cutline_layer.GetNextFeature() while cutline_feature: poly = cutline_feature.GetGeometryRef().Clone() feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() cutline_feature = cutline_layer.GetNextFeature() ds.Destroy()
Add script to build cutline map#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os.path driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("cutline-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) for fn in glob("cutlines/*.json"): tile_id = os.path.splitext(os.path.basename(fn))[0] cutline_ds = ogr.Open(fn) cutline_layer = cutline_ds.GetLayerByIndex(0) cutline_feature = cutline_layer.GetNextFeature() while cutline_feature: poly = cutline_feature.GetGeometryRef().Clone() feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() cutline_feature = cutline_layer.GetNextFeature() ds.Destroy()
<commit_before><commit_msg>Add script to build cutline map<commit_after>#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os.path driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("cutline-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(4326) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) for fn in glob("cutlines/*.json"): tile_id = os.path.splitext(os.path.basename(fn))[0] cutline_ds = ogr.Open(fn) cutline_layer = cutline_ds.GetLayerByIndex(0) cutline_feature = cutline_layer.GetNextFeature() while cutline_feature: poly = cutline_feature.GetGeometryRef().Clone() feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() cutline_feature = cutline_layer.GetNextFeature() ds.Destroy()
2d951505dd210357921ee65004e24b33dbf47c43
examples/membership/counting_bloom_filter.py
examples/membership/counting_bloom_filter.py
"""Example how to use Counting Bloom Filter.""" from pdsa.membership.counting_bloom_filter import CountingBloomFilter LOREM_IPSUM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit." " Mauris consequat leo ut vehicula placerat. In lacinia, nisl" " id maximus auctor, sem elit interdum urna, at efficitur tellus" " turpis at quam. Pellentesque eget iaculis turpis. Nam ac ligula" " ut nunc porttitor pharetra in non lorem. In purus metus," " sollicitudin tristique sapien." ) if __name__ == '__main__': bf = CountingBloomFilter(80000, 4) print(bf) print("Bloom filter uses {} bytes in the memory".format(bf.sizeof())) print("Filter contains approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) words = set(LOREM_IPSUM.split()) for word in words: bf.add(word.strip(" .,")) print("Added {} words, in the filter approximately {} elements".format( len(words), bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) print("Delete 'Lorem' from the filter") bf.remove("Lorem") print("In the filter approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not"))
Create example for Counting Bloom Filter
Create example for Counting Bloom Filter
Python
mit
gakhov/pdsa,gakhov/pdsa,gakhov/pdsa
Create example for Counting Bloom Filter
"""Example how to use Counting Bloom Filter.""" from pdsa.membership.counting_bloom_filter import CountingBloomFilter LOREM_IPSUM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit." " Mauris consequat leo ut vehicula placerat. In lacinia, nisl" " id maximus auctor, sem elit interdum urna, at efficitur tellus" " turpis at quam. Pellentesque eget iaculis turpis. Nam ac ligula" " ut nunc porttitor pharetra in non lorem. In purus metus," " sollicitudin tristique sapien." ) if __name__ == '__main__': bf = CountingBloomFilter(80000, 4) print(bf) print("Bloom filter uses {} bytes in the memory".format(bf.sizeof())) print("Filter contains approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) words = set(LOREM_IPSUM.split()) for word in words: bf.add(word.strip(" .,")) print("Added {} words, in the filter approximately {} elements".format( len(words), bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) print("Delete 'Lorem' from the filter") bf.remove("Lorem") print("In the filter approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not"))
<commit_before><commit_msg>Create example for Counting Bloom Filter<commit_after>
"""Example how to use Counting Bloom Filter.""" from pdsa.membership.counting_bloom_filter import CountingBloomFilter LOREM_IPSUM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit." " Mauris consequat leo ut vehicula placerat. In lacinia, nisl" " id maximus auctor, sem elit interdum urna, at efficitur tellus" " turpis at quam. Pellentesque eget iaculis turpis. Nam ac ligula" " ut nunc porttitor pharetra in non lorem. In purus metus," " sollicitudin tristique sapien." ) if __name__ == '__main__': bf = CountingBloomFilter(80000, 4) print(bf) print("Bloom filter uses {} bytes in the memory".format(bf.sizeof())) print("Filter contains approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) words = set(LOREM_IPSUM.split()) for word in words: bf.add(word.strip(" .,")) print("Added {} words, in the filter approximately {} elements".format( len(words), bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) print("Delete 'Lorem' from the filter") bf.remove("Lorem") print("In the filter approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not"))
Create example for Counting Bloom Filter"""Example how to use Counting Bloom Filter.""" from pdsa.membership.counting_bloom_filter import CountingBloomFilter LOREM_IPSUM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit." " Mauris consequat leo ut vehicula placerat. In lacinia, nisl" " id maximus auctor, sem elit interdum urna, at efficitur tellus" " turpis at quam. Pellentesque eget iaculis turpis. Nam ac ligula" " ut nunc porttitor pharetra in non lorem. In purus metus," " sollicitudin tristique sapien." ) if __name__ == '__main__': bf = CountingBloomFilter(80000, 4) print(bf) print("Bloom filter uses {} bytes in the memory".format(bf.sizeof())) print("Filter contains approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) words = set(LOREM_IPSUM.split()) for word in words: bf.add(word.strip(" .,")) print("Added {} words, in the filter approximately {} elements".format( len(words), bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) print("Delete 'Lorem' from the filter") bf.remove("Lorem") print("In the filter approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not"))
<commit_before><commit_msg>Create example for Counting Bloom Filter<commit_after>"""Example how to use Counting Bloom Filter.""" from pdsa.membership.counting_bloom_filter import CountingBloomFilter LOREM_IPSUM = ( "Lorem ipsum dolor sit amet, consectetur adipiscing elit." " Mauris consequat leo ut vehicula placerat. In lacinia, nisl" " id maximus auctor, sem elit interdum urna, at efficitur tellus" " turpis at quam. Pellentesque eget iaculis turpis. Nam ac ligula" " ut nunc porttitor pharetra in non lorem. In purus metus," " sollicitudin tristique sapien." ) if __name__ == '__main__': bf = CountingBloomFilter(80000, 4) print(bf) print("Bloom filter uses {} bytes in the memory".format(bf.sizeof())) print("Filter contains approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) words = set(LOREM_IPSUM.split()) for word in words: bf.add(word.strip(" .,")) print("Added {} words, in the filter approximately {} elements".format( len(words), bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not")) print("Delete 'Lorem' from the filter") bf.remove("Lorem") print("In the filter approximately {} elements".format(bf.count())) print("'Lorem' {} in the filter".format( "is" if bf.test("Lorem") else "is not"))
7db4c411c75fc8bbae5ac357c1fe28291d06bcb5
UM/Settings/ProfileReader.py
UM/Settings/ProfileReader.py
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject class ProfileReader(PluginObject): def __init__(self): super().__init__() ## Read profile data from a file and return a filled profile. # # \return node \type{Profile} The profile that was obtained from the file. def read(self, file_name): raise NotImplementedError("Profile reader plugin was not correctly implemented. The read function was not implemented.")
Add profile reader plugin base class
Add profile reader plugin base class This class is identical to the mesh reader base class. It simply defines a read() function that will give an exception if the plugin is not implemented properly. Contributes to issue CURA-34.
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
Add profile reader plugin base class This class is identical to the mesh reader base class. It simply defines a read() function that will give an exception if the plugin is not implemented properly. Contributes to issue CURA-34.
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject class ProfileReader(PluginObject): def __init__(self): super().__init__() ## Read profile data from a file and return a filled profile. # # \return node \type{Profile} The profile that was obtained from the file. def read(self, file_name): raise NotImplementedError("Profile reader plugin was not correctly implemented. The read function was not implemented.")
<commit_before><commit_msg>Add profile reader plugin base class This class is identical to the mesh reader base class. It simply defines a read() function that will give an exception if the plugin is not implemented properly. Contributes to issue CURA-34.<commit_after>
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject class ProfileReader(PluginObject): def __init__(self): super().__init__() ## Read profile data from a file and return a filled profile. # # \return node \type{Profile} The profile that was obtained from the file. def read(self, file_name): raise NotImplementedError("Profile reader plugin was not correctly implemented. The read function was not implemented.")
Add profile reader plugin base class This class is identical to the mesh reader base class. It simply defines a read() function that will give an exception if the plugin is not implemented properly. Contributes to issue CURA-34.# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject class ProfileReader(PluginObject): def __init__(self): super().__init__() ## Read profile data from a file and return a filled profile. # # \return node \type{Profile} The profile that was obtained from the file. def read(self, file_name): raise NotImplementedError("Profile reader plugin was not correctly implemented. The read function was not implemented.")
<commit_before><commit_msg>Add profile reader plugin base class This class is identical to the mesh reader base class. It simply defines a read() function that will give an exception if the plugin is not implemented properly. Contributes to issue CURA-34.<commit_after># Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject class ProfileReader(PluginObject): def __init__(self): super().__init__() ## Read profile data from a file and return a filled profile. # # \return node \type{Profile} The profile that was obtained from the file. def read(self, file_name): raise NotImplementedError("Profile reader plugin was not correctly implemented. The read function was not implemented.")
0e91d401848918043b23a3921de54c0eda618d00
createStateDecoded.py
createStateDecoded.py
settings = { "mappings": { "law": { "properties": { "text": { "type": "string", "term_vector": "with_positions_offsets_payloads" } } } }, "settings": { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 }, } } from elasticsearch import Elasticsearch, TransportError try: es = Elasticsearch("http://localhost:9200") es.indices.delete(index='statedecoded', ignore=[400,404]) es.indices.create(index='statedecoded', body=settings) except TransportError as e: print(repr(e))
Add script to create statedecoded index
Add script to create statedecoded index
Python
apache-2.0
o19s/semantic-search-course
Add script to create statedecoded index
settings = { "mappings": { "law": { "properties": { "text": { "type": "string", "term_vector": "with_positions_offsets_payloads" } } } }, "settings": { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 }, } } from elasticsearch import Elasticsearch, TransportError try: es = Elasticsearch("http://localhost:9200") es.indices.delete(index='statedecoded', ignore=[400,404]) es.indices.create(index='statedecoded', body=settings) except TransportError as e: print(repr(e))
<commit_before><commit_msg>Add script to create statedecoded index<commit_after>
settings = { "mappings": { "law": { "properties": { "text": { "type": "string", "term_vector": "with_positions_offsets_payloads" } } } }, "settings": { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 }, } } from elasticsearch import Elasticsearch, TransportError try: es = Elasticsearch("http://localhost:9200") es.indices.delete(index='statedecoded', ignore=[400,404]) es.indices.create(index='statedecoded', body=settings) except TransportError as e: print(repr(e))
Add script to create statedecoded index settings = { "mappings": { "law": { "properties": { "text": { "type": "string", "term_vector": "with_positions_offsets_payloads" } } } }, "settings": { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 }, } } from elasticsearch import Elasticsearch, TransportError try: es = Elasticsearch("http://localhost:9200") es.indices.delete(index='statedecoded', ignore=[400,404]) es.indices.create(index='statedecoded', body=settings) except TransportError as e: print(repr(e))
<commit_before><commit_msg>Add script to create statedecoded index<commit_after> settings = { "mappings": { "law": { "properties": { "text": { "type": "string", "term_vector": "with_positions_offsets_payloads" } } } }, "settings": { "index" : { "number_of_shards" : 1, "number_of_replicas" : 0 }, } } from elasticsearch import Elasticsearch, TransportError try: es = Elasticsearch("http://localhost:9200") es.indices.delete(index='statedecoded', ignore=[400,404]) es.indices.create(index='statedecoded', body=settings) except TransportError as e: print(repr(e))
88d2b3f21b318559172e1595ba9209bd9d2a373f
mclearn/tests/test_aggregators.py
mclearn/tests/test_aggregators.py
from mclearn.aggregators import schulze_method class TestAggregators: @classmethod def setup_class(cls): cls.voters = \ [['A', 'C', 'B', 'E', 'D']] * 5 + \ [['A', 'D', 'E', 'C', 'B']] * 5 + \ [['B', 'E', 'D', 'A', 'C']] * 8 + \ [['C', 'A', 'B', 'E', 'D']] * 3 + \ [['C', 'A', 'E', 'B', 'D']] * 7 + \ [['C', 'B', 'A', 'D', 'E']] * 2 + \ [['D', 'C', 'E', 'B', 'A']] * 7 + \ [['E', 'B', 'A', 'D', 'C']] * 8 def test_schulze_method(self): # verify example on http://wiki.electorama.com/wiki/Schulze_method assert schulze_method(self.voters, 5) == ['E', 'A', 'C', 'B', 'D']
Add test case for Schulze method
Add test case for Schulze method
Python
bsd-3-clause
chengsoonong/mclass-sky,chengsoonong/mclass-sky,alasdairtran/mclearn,alasdairtran/mclearn,chengsoonong/mclass-sky,chengsoonong/mclass-sky,alasdairtran/mclearn,alasdairtran/mclearn
Add test case for Schulze method
from mclearn.aggregators import schulze_method class TestAggregators: @classmethod def setup_class(cls): cls.voters = \ [['A', 'C', 'B', 'E', 'D']] * 5 + \ [['A', 'D', 'E', 'C', 'B']] * 5 + \ [['B', 'E', 'D', 'A', 'C']] * 8 + \ [['C', 'A', 'B', 'E', 'D']] * 3 + \ [['C', 'A', 'E', 'B', 'D']] * 7 + \ [['C', 'B', 'A', 'D', 'E']] * 2 + \ [['D', 'C', 'E', 'B', 'A']] * 7 + \ [['E', 'B', 'A', 'D', 'C']] * 8 def test_schulze_method(self): # verify example on http://wiki.electorama.com/wiki/Schulze_method assert schulze_method(self.voters, 5) == ['E', 'A', 'C', 'B', 'D']
<commit_before><commit_msg>Add test case for Schulze method<commit_after>
from mclearn.aggregators import schulze_method class TestAggregators: @classmethod def setup_class(cls): cls.voters = \ [['A', 'C', 'B', 'E', 'D']] * 5 + \ [['A', 'D', 'E', 'C', 'B']] * 5 + \ [['B', 'E', 'D', 'A', 'C']] * 8 + \ [['C', 'A', 'B', 'E', 'D']] * 3 + \ [['C', 'A', 'E', 'B', 'D']] * 7 + \ [['C', 'B', 'A', 'D', 'E']] * 2 + \ [['D', 'C', 'E', 'B', 'A']] * 7 + \ [['E', 'B', 'A', 'D', 'C']] * 8 def test_schulze_method(self): # verify example on http://wiki.electorama.com/wiki/Schulze_method assert schulze_method(self.voters, 5) == ['E', 'A', 'C', 'B', 'D']
Add test case for Schulze methodfrom mclearn.aggregators import schulze_method class TestAggregators: @classmethod def setup_class(cls): cls.voters = \ [['A', 'C', 'B', 'E', 'D']] * 5 + \ [['A', 'D', 'E', 'C', 'B']] * 5 + \ [['B', 'E', 'D', 'A', 'C']] * 8 + \ [['C', 'A', 'B', 'E', 'D']] * 3 + \ [['C', 'A', 'E', 'B', 'D']] * 7 + \ [['C', 'B', 'A', 'D', 'E']] * 2 + \ [['D', 'C', 'E', 'B', 'A']] * 7 + \ [['E', 'B', 'A', 'D', 'C']] * 8 def test_schulze_method(self): # verify example on http://wiki.electorama.com/wiki/Schulze_method assert schulze_method(self.voters, 5) == ['E', 'A', 'C', 'B', 'D']
<commit_before><commit_msg>Add test case for Schulze method<commit_after>from mclearn.aggregators import schulze_method class TestAggregators: @classmethod def setup_class(cls): cls.voters = \ [['A', 'C', 'B', 'E', 'D']] * 5 + \ [['A', 'D', 'E', 'C', 'B']] * 5 + \ [['B', 'E', 'D', 'A', 'C']] * 8 + \ [['C', 'A', 'B', 'E', 'D']] * 3 + \ [['C', 'A', 'E', 'B', 'D']] * 7 + \ [['C', 'B', 'A', 'D', 'E']] * 2 + \ [['D', 'C', 'E', 'B', 'A']] * 7 + \ [['E', 'B', 'A', 'D', 'C']] * 8 def test_schulze_method(self): # verify example on http://wiki.electorama.com/wiki/Schulze_method assert schulze_method(self.voters, 5) == ['E', 'A', 'C', 'B', 'D']
1c54ef090dddb67e52dda00a6fd816807b33b5a3
migrations/versions/0203_fix_old_incomplete_jobs.py
migrations/versions/0203_fix_old_incomplete_jobs.py
"""empty message Revision ID: 0203_fix_old_incomplete_jobs Revises: 0202_new_letter_pricing Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0203_fix_old_incomplete_jobs' down_revision = '0202_new_letter_pricing' from alembic import op def upgrade(): op.execute(""" UPDATE jobs SET processing_started = created_at WHERE processing_started IS NULL AND job_status = 'in progress' """) def downgrade(): pass
Clean up old, incomplete jobs
Clean up old, incomplete jobs
Python
mit
alphagov/notifications-api,alphagov/notifications-api
Clean up old, incomplete jobs
"""empty message Revision ID: 0203_fix_old_incomplete_jobs Revises: 0202_new_letter_pricing Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0203_fix_old_incomplete_jobs' down_revision = '0202_new_letter_pricing' from alembic import op def upgrade(): op.execute(""" UPDATE jobs SET processing_started = created_at WHERE processing_started IS NULL AND job_status = 'in progress' """) def downgrade(): pass
<commit_before><commit_msg>Clean up old, incomplete jobs<commit_after>
"""empty message Revision ID: 0203_fix_old_incomplete_jobs Revises: 0202_new_letter_pricing Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0203_fix_old_incomplete_jobs' down_revision = '0202_new_letter_pricing' from alembic import op def upgrade(): op.execute(""" UPDATE jobs SET processing_started = created_at WHERE processing_started IS NULL AND job_status = 'in progress' """) def downgrade(): pass
Clean up old, incomplete jobs"""empty message Revision ID: 0203_fix_old_incomplete_jobs Revises: 0202_new_letter_pricing Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0203_fix_old_incomplete_jobs' down_revision = '0202_new_letter_pricing' from alembic import op def upgrade(): op.execute(""" UPDATE jobs SET processing_started = created_at WHERE processing_started IS NULL AND job_status = 'in progress' """) def downgrade(): pass
<commit_before><commit_msg>Clean up old, incomplete jobs<commit_after>"""empty message Revision ID: 0203_fix_old_incomplete_jobs Revises: 0202_new_letter_pricing Create Date: 2017-06-29 12:44:16.815039 """ # revision identifiers, used by Alembic. revision = '0203_fix_old_incomplete_jobs' down_revision = '0202_new_letter_pricing' from alembic import op def upgrade(): op.execute(""" UPDATE jobs SET processing_started = created_at WHERE processing_started IS NULL AND job_status = 'in progress' """) def downgrade(): pass
85cc8916012d9ba53c46ee827d16d08fd06d39ac
tools/enumerate-runners.py
tools/enumerate-runners.py
#!/usr/bin/env python # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore.driver import DriverManager from stevedore.extension import ExtensionManager from st2common import config config.parse_args() manager = ExtensionManager(namespace='st2common.runners.runner', invoke_on_load=False) extension_names = manager.names() print('Available / installed action runners:') for name in extension_names: manager = DriverManager(namespace='st2common.runners.runner', invoke_on_load=False, name=name) runner_instance = manager.driver.get_runner() runner_metadata = manager.driver.get_metadata()[0] print('- %s (runner_module=%s,cls=%s)' % (name, runner_metadata['runner_module'], runner_instance.__class__))
Add script for listing available / installed action runners.
Add script for listing available / installed action runners.
Python
apache-2.0
nzlosh/st2,Plexxi/st2,Plexxi/st2,nzlosh/st2,nzlosh/st2,StackStorm/st2,StackStorm/st2,StackStorm/st2,Plexxi/st2,StackStorm/st2,Plexxi/st2,nzlosh/st2
Add script for listing available / installed action runners.
#!/usr/bin/env python # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore.driver import DriverManager from stevedore.extension import ExtensionManager from st2common import config config.parse_args() manager = ExtensionManager(namespace='st2common.runners.runner', invoke_on_load=False) extension_names = manager.names() print('Available / installed action runners:') for name in extension_names: manager = DriverManager(namespace='st2common.runners.runner', invoke_on_load=False, name=name) runner_instance = manager.driver.get_runner() runner_metadata = manager.driver.get_metadata()[0] print('- %s (runner_module=%s,cls=%s)' % (name, runner_metadata['runner_module'], runner_instance.__class__))
<commit_before><commit_msg>Add script for listing available / installed action runners.<commit_after>
#!/usr/bin/env python # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore.driver import DriverManager from stevedore.extension import ExtensionManager from st2common import config config.parse_args() manager = ExtensionManager(namespace='st2common.runners.runner', invoke_on_load=False) extension_names = manager.names() print('Available / installed action runners:') for name in extension_names: manager = DriverManager(namespace='st2common.runners.runner', invoke_on_load=False, name=name) runner_instance = manager.driver.get_runner() runner_metadata = manager.driver.get_metadata()[0] print('- %s (runner_module=%s,cls=%s)' % (name, runner_metadata['runner_module'], runner_instance.__class__))
Add script for listing available / installed action runners.#!/usr/bin/env python # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore.driver import DriverManager from stevedore.extension import ExtensionManager from st2common import config config.parse_args() manager = ExtensionManager(namespace='st2common.runners.runner', invoke_on_load=False) extension_names = manager.names() print('Available / installed action runners:') for name in extension_names: manager = DriverManager(namespace='st2common.runners.runner', invoke_on_load=False, name=name) runner_instance = manager.driver.get_runner() runner_metadata = manager.driver.get_metadata()[0] print('- %s (runner_module=%s,cls=%s)' % (name, runner_metadata['runner_module'], runner_instance.__class__))
<commit_before><commit_msg>Add script for listing available / installed action runners.<commit_after>#!/usr/bin/env python # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from stevedore.driver import DriverManager from stevedore.extension import ExtensionManager from st2common import config config.parse_args() manager = ExtensionManager(namespace='st2common.runners.runner', invoke_on_load=False) extension_names = manager.names() print('Available / installed action runners:') for name in extension_names: manager = DriverManager(namespace='st2common.runners.runner', invoke_on_load=False, name=name) runner_instance = manager.driver.get_runner() runner_metadata = manager.driver.get_metadata()[0] print('- %s (runner_module=%s,cls=%s)' % (name, runner_metadata['runner_module'], runner_instance.__class__))
048f5b9d8abc9c768db5714d343718e283dc4d4b
tempest/tests/services/compute/test_certificates_client.py
tempest/tests/services/compute/test_certificates_client.py
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 from oslo_serialization import jsonutils as json from oslotest import mockpatch from tempest.services.compute.json import certificates_client from tempest.tests import base from tempest.tests import fake_auth_provider class TestCertificatesClient(base.TestCase): FAKE_CERTIFICATE = { "certificate": { "data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n", "private_key": None } } def setUp(self): super(TestCertificatesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = certificates_client.CertificatesClient( fake_auth, 'compute', 'regionOne') def _test_show_certificate(self, bytes_body=False): serialized_body = json.dumps(self.FAKE_CERTIFICATE) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.get', return_value=mocked_resp)) resp = self.client.show_certificate("fake-id") self.assertEqual(self.FAKE_CERTIFICATE, resp) def test_show_certificate_with_str_body(self): self._test_show_certificate() def test_show_certificate_with_bytes_body(self): self._test_show_certificate(bytes_body=True) def _test_create_certificate(self, bytes_body=False): cert = copy.deepcopy(self.FAKE_CERTIFICATE) cert['certificate']['private_key'] = "my_private_key" serialized_body = json.dumps(cert) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.post', return_value=mocked_resp)) resp = self.client.create_certificate() self.assertEqual(cert, resp) def test_create_certificate_with_str_body(self): self._test_create_certificate() def test_create_certificate_with_bytes_body(self): self._test_create_certificate(bytes_body=True)
Add unit tests for certificates_client
Add unit tests for certificates_client This patch adds unit tests for certificates_client module Change-Id: If5427832a7446590c3a4b5d62bfe61d70bd1d112
Python
apache-2.0
sebrandon1/tempest,xbezdick/tempest,vedujoshi/tempest,xbezdick/tempest,cisco-openstack/tempest,zsoltdudas/lis-tempest,bigswitch/tempest,pczerkas/tempest,bigswitch/tempest,izadorozhna/tempest,masayukig/tempest,pczerkas/tempest,zsoltdudas/lis-tempest,sebrandon1/tempest,rakeshmi/tempest,cisco-openstack/tempest,Tesora/tesora-tempest,flyingfish007/tempest,vedujoshi/tempest,LIS/lis-tempest,masayukig/tempest,openstack/tempest,rakeshmi/tempest,tonyli71/tempest,izadorozhna/tempest,tonyli71/tempest,flyingfish007/tempest,dkalashnik/tempest,Juniper/tempest,Tesora/tesora-tempest,dkalashnik/tempest,Juniper/tempest,LIS/lis-tempest,openstack/tempest
Add unit tests for certificates_client This patch adds unit tests for certificates_client module Change-Id: If5427832a7446590c3a4b5d62bfe61d70bd1d112
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 from oslo_serialization import jsonutils as json from oslotest import mockpatch from tempest.services.compute.json import certificates_client from tempest.tests import base from tempest.tests import fake_auth_provider class TestCertificatesClient(base.TestCase): FAKE_CERTIFICATE = { "certificate": { "data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n", "private_key": None } } def setUp(self): super(TestCertificatesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = certificates_client.CertificatesClient( fake_auth, 'compute', 'regionOne') def _test_show_certificate(self, bytes_body=False): serialized_body = json.dumps(self.FAKE_CERTIFICATE) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.get', return_value=mocked_resp)) resp = self.client.show_certificate("fake-id") self.assertEqual(self.FAKE_CERTIFICATE, resp) def test_show_certificate_with_str_body(self): self._test_show_certificate() def test_show_certificate_with_bytes_body(self): self._test_show_certificate(bytes_body=True) def _test_create_certificate(self, bytes_body=False): cert = copy.deepcopy(self.FAKE_CERTIFICATE) cert['certificate']['private_key'] = "my_private_key" serialized_body = json.dumps(cert) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.post', return_value=mocked_resp)) resp = self.client.create_certificate() self.assertEqual(cert, resp) def test_create_certificate_with_str_body(self): self._test_create_certificate() def test_create_certificate_with_bytes_body(self): self._test_create_certificate(bytes_body=True)
<commit_before><commit_msg>Add unit tests for certificates_client This patch adds unit tests for certificates_client module Change-Id: If5427832a7446590c3a4b5d62bfe61d70bd1d112<commit_after>
# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 from oslo_serialization import jsonutils as json from oslotest import mockpatch from tempest.services.compute.json import certificates_client from tempest.tests import base from tempest.tests import fake_auth_provider class TestCertificatesClient(base.TestCase): FAKE_CERTIFICATE = { "certificate": { "data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n", "private_key": None } } def setUp(self): super(TestCertificatesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = certificates_client.CertificatesClient( fake_auth, 'compute', 'regionOne') def _test_show_certificate(self, bytes_body=False): serialized_body = json.dumps(self.FAKE_CERTIFICATE) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.get', return_value=mocked_resp)) resp = self.client.show_certificate("fake-id") self.assertEqual(self.FAKE_CERTIFICATE, resp) def test_show_certificate_with_str_body(self): self._test_show_certificate() def test_show_certificate_with_bytes_body(self): self._test_show_certificate(bytes_body=True) def _test_create_certificate(self, bytes_body=False): cert = copy.deepcopy(self.FAKE_CERTIFICATE) cert['certificate']['private_key'] = "my_private_key" serialized_body = json.dumps(cert) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.post', return_value=mocked_resp)) resp = self.client.create_certificate() self.assertEqual(cert, resp) def test_create_certificate_with_str_body(self): self._test_create_certificate() def test_create_certificate_with_bytes_body(self): self._test_create_certificate(bytes_body=True)
Add unit tests for certificates_client This patch adds unit tests for certificates_client module Change-Id: If5427832a7446590c3a4b5d62bfe61d70bd1d112# Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 from oslo_serialization import jsonutils as json from oslotest import mockpatch from tempest.services.compute.json import certificates_client from tempest.tests import base from tempest.tests import fake_auth_provider class TestCertificatesClient(base.TestCase): FAKE_CERTIFICATE = { "certificate": { "data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n", "private_key": None } } def setUp(self): super(TestCertificatesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = certificates_client.CertificatesClient( fake_auth, 'compute', 'regionOne') def _test_show_certificate(self, bytes_body=False): serialized_body = json.dumps(self.FAKE_CERTIFICATE) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.get', return_value=mocked_resp)) resp = self.client.show_certificate("fake-id") self.assertEqual(self.FAKE_CERTIFICATE, resp) def test_show_certificate_with_str_body(self): self._test_show_certificate() def test_show_certificate_with_bytes_body(self): self._test_show_certificate(bytes_body=True) def _test_create_certificate(self, bytes_body=False): cert = copy.deepcopy(self.FAKE_CERTIFICATE) cert['certificate']['private_key'] = "my_private_key" serialized_body = json.dumps(cert) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.post', return_value=mocked_resp)) resp = self.client.create_certificate() self.assertEqual(cert, resp) def test_create_certificate_with_str_body(self): self._test_create_certificate() def test_create_certificate_with_bytes_body(self): self._test_create_certificate(bytes_body=True)
<commit_before><commit_msg>Add unit tests for certificates_client This patch adds unit tests for certificates_client module Change-Id: If5427832a7446590c3a4b5d62bfe61d70bd1d112<commit_after># Copyright 2015 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import httplib2 from oslo_serialization import jsonutils as json from oslotest import mockpatch from tempest.services.compute.json import certificates_client from tempest.tests import base from tempest.tests import fake_auth_provider class TestCertificatesClient(base.TestCase): FAKE_CERTIFICATE = { "certificate": { "data": "-----BEGIN----MIICyzCCAjSgAwI----END CERTIFICATE-----\n", "private_key": None } } def setUp(self): super(TestCertificatesClient, self).setUp() fake_auth = fake_auth_provider.FakeAuthProvider() self.client = certificates_client.CertificatesClient( fake_auth, 'compute', 'regionOne') def _test_show_certificate(self, bytes_body=False): serialized_body = json.dumps(self.FAKE_CERTIFICATE) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.get', return_value=mocked_resp)) resp = self.client.show_certificate("fake-id") self.assertEqual(self.FAKE_CERTIFICATE, resp) def test_show_certificate_with_str_body(self): self._test_show_certificate() def test_show_certificate_with_bytes_body(self): self._test_show_certificate(bytes_body=True) def _test_create_certificate(self, bytes_body=False): cert = copy.deepcopy(self.FAKE_CERTIFICATE) cert['certificate']['private_key'] = "my_private_key" serialized_body = json.dumps(cert) if bytes_body: serialized_body = serialized_body.encode('utf-8') mocked_resp = (httplib2.Response({'status': 200}), serialized_body) self.useFixture(mockpatch.Patch( 'tempest.common.service_client.ServiceClient.post', return_value=mocked_resp)) resp = self.client.create_certificate() self.assertEqual(cert, resp) def test_create_certificate_with_str_body(self): self._test_create_certificate() def test_create_certificate_with_bytes_body(self): self._test_create_certificate(bytes_body=True)
4580851cf8531ace086af6b2649d7467defa6cf7
tests/PluginRegistry/OldTestPlugin/__init__.py
tests/PluginRegistry/OldTestPlugin/__init__.py
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. def getMetaData(): return { "name": "OldTestPlugin" } def register(app): app.registerTestPlugin("OldTestPlugin")
Add missing OldTestPlugin for PluginRegistry test
Add missing OldTestPlugin for PluginRegistry test
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
Add missing OldTestPlugin for PluginRegistry test
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. def getMetaData(): return { "name": "OldTestPlugin" } def register(app): app.registerTestPlugin("OldTestPlugin")
<commit_before><commit_msg>Add missing OldTestPlugin for PluginRegistry test<commit_after>
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. def getMetaData(): return { "name": "OldTestPlugin" } def register(app): app.registerTestPlugin("OldTestPlugin")
Add missing OldTestPlugin for PluginRegistry test# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. def getMetaData(): return { "name": "OldTestPlugin" } def register(app): app.registerTestPlugin("OldTestPlugin")
<commit_before><commit_msg>Add missing OldTestPlugin for PluginRegistry test<commit_after># Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. def getMetaData(): return { "name": "OldTestPlugin" } def register(app): app.registerTestPlugin("OldTestPlugin")
3d937a2438a3ae7b417561031de2bfe632802720
brew/utilities/efficiency.py
brew/utilities/efficiency.py
# -*- coding: utf-8 -*- from .sugar import sg_to_gu __all__ = [ u'calculate_brew_house_yield', ] def calculate_brew_house_yield(wort_volume, sg, grain_additions): gravity_units = 0.0 for grain_add in grain_additions: gravity_units += grain_add.grain.ppg * grain_add.weight return sg_to_gu(sg) * wort_volume / gravity_units
Add a brew house yield calculator
Add a brew house yield calculator
Python
mit
chrisgilmerproj/brewday,chrisgilmerproj/brewday
Add a brew house yield calculator
# -*- coding: utf-8 -*- from .sugar import sg_to_gu __all__ = [ u'calculate_brew_house_yield', ] def calculate_brew_house_yield(wort_volume, sg, grain_additions): gravity_units = 0.0 for grain_add in grain_additions: gravity_units += grain_add.grain.ppg * grain_add.weight return sg_to_gu(sg) * wort_volume / gravity_units
<commit_before><commit_msg>Add a brew house yield calculator<commit_after>
# -*- coding: utf-8 -*- from .sugar import sg_to_gu __all__ = [ u'calculate_brew_house_yield', ] def calculate_brew_house_yield(wort_volume, sg, grain_additions): gravity_units = 0.0 for grain_add in grain_additions: gravity_units += grain_add.grain.ppg * grain_add.weight return sg_to_gu(sg) * wort_volume / gravity_units
Add a brew house yield calculator# -*- coding: utf-8 -*- from .sugar import sg_to_gu __all__ = [ u'calculate_brew_house_yield', ] def calculate_brew_house_yield(wort_volume, sg, grain_additions): gravity_units = 0.0 for grain_add in grain_additions: gravity_units += grain_add.grain.ppg * grain_add.weight return sg_to_gu(sg) * wort_volume / gravity_units
<commit_before><commit_msg>Add a brew house yield calculator<commit_after># -*- coding: utf-8 -*- from .sugar import sg_to_gu __all__ = [ u'calculate_brew_house_yield', ] def calculate_brew_house_yield(wort_volume, sg, grain_additions): gravity_units = 0.0 for grain_add in grain_additions: gravity_units += grain_add.grain.ppg * grain_add.weight return sg_to_gu(sg) * wort_volume / gravity_units
653bf95527724393c8b18409fbf8addc20484072
mnist.py
mnist.py
# this file defines function for parsing the MNIST dataset as available to # download here: # http://yann.lecun.com/exdb/mnist/index.html # The extracted files are expected to reside in a subfolder called Data/MNIST/ import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import os path = "Data/MNIST/" #reads training and test labels as numpy.arrays def read_labels(): training_labels = read_labels_from_file(path + "train-labels.idx1-ubyte") test_labels = read_labels_from_file(path + "t10k-labels.idx1-ubyte") return training_labels, test_labels def read_labels_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_labels = np.fromfile(f, dtype='>i4', count=1) labels = np.fromfile(f, dtype='i1') return labels #reads training and test images as numpy.arrays def read_images(): training_images = read_images_from_file(path + "train-images.idx3-ubyte") test_images = read_images_from_file(path + "t10k-images.idx3-ubyte") return training_images, test_images def read_images_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_images = np.fromfile(f, dtype='>i4', count=1) n_rows = np.fromfile(f, dtype='>i4', count=1) n_columns = np.fromfile(f, dtype='>i4', count=1) pixels = np.fromfile(f, dtype='B').reshape(n_images, n_rows*n_columns) return pixels #shows all images in sequence def show_test_images(): tr, te = read_images() show_images(te, "MNIST test images") def show_training_images(): tr, re = read_images() show_images(tr, "MNIST training images") def show_images(images, title): im = plt.imshow(images[0].reshape(28,28), cmap=plt.cm.gray, interpolation='nearest') plt.title(title, fontsize=20) for i in range(images.shape[0]): im.set_data(images[i].reshape(28,28)) plt.pause(.1) plt.draw()
Add functions for reading MNIST dataset int numpy arrays.
Add functions for reading MNIST dataset int numpy arrays.
Python
mit
drsstein/PyRat
Add functions for reading MNIST dataset int numpy arrays.
# this file defines function for parsing the MNIST dataset as available to # download here: # http://yann.lecun.com/exdb/mnist/index.html # The extracted files are expected to reside in a subfolder called Data/MNIST/ import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import os path = "Data/MNIST/" #reads training and test labels as numpy.arrays def read_labels(): training_labels = read_labels_from_file(path + "train-labels.idx1-ubyte") test_labels = read_labels_from_file(path + "t10k-labels.idx1-ubyte") return training_labels, test_labels def read_labels_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_labels = np.fromfile(f, dtype='>i4', count=1) labels = np.fromfile(f, dtype='i1') return labels #reads training and test images as numpy.arrays def read_images(): training_images = read_images_from_file(path + "train-images.idx3-ubyte") test_images = read_images_from_file(path + "t10k-images.idx3-ubyte") return training_images, test_images def read_images_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_images = np.fromfile(f, dtype='>i4', count=1) n_rows = np.fromfile(f, dtype='>i4', count=1) n_columns = np.fromfile(f, dtype='>i4', count=1) pixels = np.fromfile(f, dtype='B').reshape(n_images, n_rows*n_columns) return pixels #shows all images in sequence def show_test_images(): tr, te = read_images() show_images(te, "MNIST test images") def show_training_images(): tr, re = read_images() show_images(tr, "MNIST training images") def show_images(images, title): im = plt.imshow(images[0].reshape(28,28), cmap=plt.cm.gray, interpolation='nearest') plt.title(title, fontsize=20) for i in range(images.shape[0]): im.set_data(images[i].reshape(28,28)) plt.pause(.1) plt.draw()
<commit_before><commit_msg>Add functions for reading MNIST dataset int numpy arrays.<commit_after>
# this file defines function for parsing the MNIST dataset as available to # download here: # http://yann.lecun.com/exdb/mnist/index.html # The extracted files are expected to reside in a subfolder called Data/MNIST/ import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import os path = "Data/MNIST/" #reads training and test labels as numpy.arrays def read_labels(): training_labels = read_labels_from_file(path + "train-labels.idx1-ubyte") test_labels = read_labels_from_file(path + "t10k-labels.idx1-ubyte") return training_labels, test_labels def read_labels_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_labels = np.fromfile(f, dtype='>i4', count=1) labels = np.fromfile(f, dtype='i1') return labels #reads training and test images as numpy.arrays def read_images(): training_images = read_images_from_file(path + "train-images.idx3-ubyte") test_images = read_images_from_file(path + "t10k-images.idx3-ubyte") return training_images, test_images def read_images_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_images = np.fromfile(f, dtype='>i4', count=1) n_rows = np.fromfile(f, dtype='>i4', count=1) n_columns = np.fromfile(f, dtype='>i4', count=1) pixels = np.fromfile(f, dtype='B').reshape(n_images, n_rows*n_columns) return pixels #shows all images in sequence def show_test_images(): tr, te = read_images() show_images(te, "MNIST test images") def show_training_images(): tr, re = read_images() show_images(tr, "MNIST training images") def show_images(images, title): im = plt.imshow(images[0].reshape(28,28), cmap=plt.cm.gray, interpolation='nearest') plt.title(title, fontsize=20) for i in range(images.shape[0]): im.set_data(images[i].reshape(28,28)) plt.pause(.1) plt.draw()
Add functions for reading MNIST dataset int numpy arrays.# this file defines function for parsing the MNIST dataset as available to # download here: # http://yann.lecun.com/exdb/mnist/index.html # The extracted files are expected to reside in a subfolder called Data/MNIST/ import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import os path = "Data/MNIST/" #reads training and test labels as numpy.arrays def read_labels(): training_labels = read_labels_from_file(path + "train-labels.idx1-ubyte") test_labels = read_labels_from_file(path + "t10k-labels.idx1-ubyte") return training_labels, test_labels def read_labels_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_labels = np.fromfile(f, dtype='>i4', count=1) labels = np.fromfile(f, dtype='i1') return labels #reads training and test images as numpy.arrays def read_images(): training_images = read_images_from_file(path + "train-images.idx3-ubyte") test_images = read_images_from_file(path + "t10k-images.idx3-ubyte") return training_images, test_images def read_images_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_images = np.fromfile(f, dtype='>i4', count=1) n_rows = np.fromfile(f, dtype='>i4', count=1) n_columns = np.fromfile(f, dtype='>i4', count=1) pixels = np.fromfile(f, dtype='B').reshape(n_images, n_rows*n_columns) return pixels #shows all images in sequence def show_test_images(): tr, te = read_images() show_images(te, "MNIST test images") def show_training_images(): tr, re = read_images() show_images(tr, "MNIST training images") def show_images(images, title): im = plt.imshow(images[0].reshape(28,28), cmap=plt.cm.gray, interpolation='nearest') plt.title(title, fontsize=20) for i in range(images.shape[0]): im.set_data(images[i].reshape(28,28)) plt.pause(.1) plt.draw()
<commit_before><commit_msg>Add functions for reading MNIST dataset int numpy arrays.<commit_after># this file defines function for parsing the MNIST dataset as available to # download here: # http://yann.lecun.com/exdb/mnist/index.html # The extracted files are expected to reside in a subfolder called Data/MNIST/ import matplotlib.pyplot as plt import matplotlib.animation as animation import numpy as np import os path = "Data/MNIST/" #reads training and test labels as numpy.arrays def read_labels(): training_labels = read_labels_from_file(path + "train-labels.idx1-ubyte") test_labels = read_labels_from_file(path + "t10k-labels.idx1-ubyte") return training_labels, test_labels def read_labels_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_labels = np.fromfile(f, dtype='>i4', count=1) labels = np.fromfile(f, dtype='i1') return labels #reads training and test images as numpy.arrays def read_images(): training_images = read_images_from_file(path + "train-images.idx3-ubyte") test_images = read_images_from_file(path + "t10k-images.idx3-ubyte") return training_images, test_images def read_images_from_file(filepath): f = open(filepath, "rb") magic = np.fromfile(f, dtype='>i4', count=1) n_images = np.fromfile(f, dtype='>i4', count=1) n_rows = np.fromfile(f, dtype='>i4', count=1) n_columns = np.fromfile(f, dtype='>i4', count=1) pixels = np.fromfile(f, dtype='B').reshape(n_images, n_rows*n_columns) return pixels #shows all images in sequence def show_test_images(): tr, te = read_images() show_images(te, "MNIST test images") def show_training_images(): tr, re = read_images() show_images(tr, "MNIST training images") def show_images(images, title): im = plt.imshow(images[0].reshape(28,28), cmap=plt.cm.gray, interpolation='nearest') plt.title(title, fontsize=20) for i in range(images.shape[0]): im.set_data(images[i].reshape(28,28)) plt.pause(.1) plt.draw()
13480e3aeb9955d82db742030b85b1621a2ecb7c
propalyzer_site/propalyzer_app/pdf_render.py
propalyzer_site/propalyzer_app/pdf_render.py
from io import BytesIO from django.http import HttpResponse from django.template.loader import get_template import xhtml2pdf.pisa as pisa class Render: @staticmethod def render(path: str, params: dict): template = get_template(path) html = template.render(params) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("UTF-8")), response) if not pdf.err: return HttpResponse(response.getvalue(), content_type='application/pdf') else: return HttpResponse("Error Rendering PDF", status=400)
Add pdf render file to VCS
Add pdf render file to VCS
Python
mit
toms3t/Propalyzer,toms3t/Propalyzer,toms3t/Propalyzer
Add pdf render file to VCS
from io import BytesIO from django.http import HttpResponse from django.template.loader import get_template import xhtml2pdf.pisa as pisa class Render: @staticmethod def render(path: str, params: dict): template = get_template(path) html = template.render(params) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("UTF-8")), response) if not pdf.err: return HttpResponse(response.getvalue(), content_type='application/pdf') else: return HttpResponse("Error Rendering PDF", status=400)
<commit_before><commit_msg>Add pdf render file to VCS<commit_after>
from io import BytesIO from django.http import HttpResponse from django.template.loader import get_template import xhtml2pdf.pisa as pisa class Render: @staticmethod def render(path: str, params: dict): template = get_template(path) html = template.render(params) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("UTF-8")), response) if not pdf.err: return HttpResponse(response.getvalue(), content_type='application/pdf') else: return HttpResponse("Error Rendering PDF", status=400)
Add pdf render file to VCSfrom io import BytesIO from django.http import HttpResponse from django.template.loader import get_template import xhtml2pdf.pisa as pisa class Render: @staticmethod def render(path: str, params: dict): template = get_template(path) html = template.render(params) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("UTF-8")), response) if not pdf.err: return HttpResponse(response.getvalue(), content_type='application/pdf') else: return HttpResponse("Error Rendering PDF", status=400)
<commit_before><commit_msg>Add pdf render file to VCS<commit_after>from io import BytesIO from django.http import HttpResponse from django.template.loader import get_template import xhtml2pdf.pisa as pisa class Render: @staticmethod def render(path: str, params: dict): template = get_template(path) html = template.render(params) response = BytesIO() pdf = pisa.pisaDocument(BytesIO(html.encode("UTF-8")), response) if not pdf.err: return HttpResponse(response.getvalue(), content_type='application/pdf') else: return HttpResponse("Error Rendering PDF", status=400)
5fccb7cb7059847dbac5f78ee438a8f31ab78430
CodeFights/primarySchool.py
CodeFights/primarySchool.py
#!/usr/local/bin/python # Code Fights Primary School Problem class Rectangle(object): def __init__(self, height, width): self.height = height self.width = width def __str__(self): return '{} x {} = {}'.format(self.height, self.width, self.area) @property def area(self): return self.height * self.width @area.setter def area(self): self.area = self.height * self.width def primarySchool(height, width): return str(Rectangle(height, width)) def main(): tests = [ [7, 4, "7 x 4 = 28"], [1, 20, "1 x 20 = 20"], [3, 13, "3 x 13 = 39"], [10, 3, "10 x 3 = 30"], [16, 12, "16 x 12 = 192"] ] for t in tests: res = primarySchool(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: primarySchool({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: primarySchool({}, {}) returned {}, answer: {}" .format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
Solve Code Fights primary school problem
Solve Code Fights primary school problem
Python
mit
HKuz/Test_Code
Solve Code Fights primary school problem
#!/usr/local/bin/python # Code Fights Primary School Problem class Rectangle(object): def __init__(self, height, width): self.height = height self.width = width def __str__(self): return '{} x {} = {}'.format(self.height, self.width, self.area) @property def area(self): return self.height * self.width @area.setter def area(self): self.area = self.height * self.width def primarySchool(height, width): return str(Rectangle(height, width)) def main(): tests = [ [7, 4, "7 x 4 = 28"], [1, 20, "1 x 20 = 20"], [3, 13, "3 x 13 = 39"], [10, 3, "10 x 3 = 30"], [16, 12, "16 x 12 = 192"] ] for t in tests: res = primarySchool(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: primarySchool({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: primarySchool({}, {}) returned {}, answer: {}" .format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights primary school problem<commit_after>
#!/usr/local/bin/python # Code Fights Primary School Problem class Rectangle(object): def __init__(self, height, width): self.height = height self.width = width def __str__(self): return '{} x {} = {}'.format(self.height, self.width, self.area) @property def area(self): return self.height * self.width @area.setter def area(self): self.area = self.height * self.width def primarySchool(height, width): return str(Rectangle(height, width)) def main(): tests = [ [7, 4, "7 x 4 = 28"], [1, 20, "1 x 20 = 20"], [3, 13, "3 x 13 = 39"], [10, 3, "10 x 3 = 30"], [16, 12, "16 x 12 = 192"] ] for t in tests: res = primarySchool(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: primarySchool({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: primarySchool({}, {}) returned {}, answer: {}" .format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
Solve Code Fights primary school problem#!/usr/local/bin/python # Code Fights Primary School Problem class Rectangle(object): def __init__(self, height, width): self.height = height self.width = width def __str__(self): return '{} x {} = {}'.format(self.height, self.width, self.area) @property def area(self): return self.height * self.width @area.setter def area(self): self.area = self.height * self.width def primarySchool(height, width): return str(Rectangle(height, width)) def main(): tests = [ [7, 4, "7 x 4 = 28"], [1, 20, "1 x 20 = 20"], [3, 13, "3 x 13 = 39"], [10, 3, "10 x 3 = 30"], [16, 12, "16 x 12 = 192"] ] for t in tests: res = primarySchool(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: primarySchool({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: primarySchool({}, {}) returned {}, answer: {}" .format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights primary school problem<commit_after>#!/usr/local/bin/python # Code Fights Primary School Problem class Rectangle(object): def __init__(self, height, width): self.height = height self.width = width def __str__(self): return '{} x {} = {}'.format(self.height, self.width, self.area) @property def area(self): return self.height * self.width @area.setter def area(self): self.area = self.height * self.width def primarySchool(height, width): return str(Rectangle(height, width)) def main(): tests = [ [7, 4, "7 x 4 = 28"], [1, 20, "1 x 20 = 20"], [3, 13, "3 x 13 = 39"], [10, 3, "10 x 3 = 30"], [16, 12, "16 x 12 = 192"] ] for t in tests: res = primarySchool(t[0], t[1]) ans = t[2] if ans == res: print("PASSED: primarySchool({}, {}) returned {}" .format(t[0], t[1], res)) else: print("FAILED: primarySchool({}, {}) returned {}, answer: {}" .format(t[0], t[1], res, ans)) if __name__ == '__main__': main()
a3332387eea89daaecd9400c5555657132a2cd9c
IPython/terminal/ptshell.py
IPython/terminal/ptshell.py
raise DeprecationWarning("""DEPRECATED: After Popular request and decision from the BDFL: `IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell` during the beta cycle (after IPython 5.0.beta3) Sorry about that. This file will be removed in 5.0 rc or final. """)
Add a warning things have been moved back.
Add a warning things have been moved back.
Python
bsd-3-clause
ipython/ipython,ipython/ipython
Add a warning things have been moved back.
raise DeprecationWarning("""DEPRECATED: After Popular request and decision from the BDFL: `IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell` during the beta cycle (after IPython 5.0.beta3) Sorry about that. This file will be removed in 5.0 rc or final. """)
<commit_before><commit_msg>Add a warning things have been moved back.<commit_after>
raise DeprecationWarning("""DEPRECATED: After Popular request and decision from the BDFL: `IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell` during the beta cycle (after IPython 5.0.beta3) Sorry about that. This file will be removed in 5.0 rc or final. """)
Add a warning things have been moved back.raise DeprecationWarning("""DEPRECATED: After Popular request and decision from the BDFL: `IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell` during the beta cycle (after IPython 5.0.beta3) Sorry about that. This file will be removed in 5.0 rc or final. """)
<commit_before><commit_msg>Add a warning things have been moved back.<commit_after>raise DeprecationWarning("""DEPRECATED: After Popular request and decision from the BDFL: `IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell` during the beta cycle (after IPython 5.0.beta3) Sorry about that. This file will be removed in 5.0 rc or final. """)
9b730bf54c807fbdc71d56fb1758b27cee6a13a9
fuel_test/openstack_swift_compact/prepare_for_tempest.py
fuel_test/openstack_swift_compact/prepare_for_tempest.py
import unittest from devops.helpers import ssh from fuel_test.helpers import safety_revert_nodes, tempest_write_config, make_tempest_objects, tempest_build_config_essex from fuel_test.openstack_swift_compact.openstack_swift_compact_test_case import OpenStackSwiftCompactTestCase class PrepareOpenStackSwiftForTempest(OpenStackSwiftCompactTestCase): def setUp(self): self.environment = self.ci().get_environment() def prepare_for_tempest_if_swift(self): safety_revert_nodes(self.environment.nodes, 'openstack') auth_host = self.ci().get_public_virtual_ip() remote = ssh( self.ci().nodes().controllers[0].ip_address, username='root', password='r00tme').sudo.ssh image_ref, image_ref_alt = make_tempest_objects(auth_host, remote) tempest_write_config(tempest_build_config_essex(auth_host, image_ref, image_ref_alt)) if __name__ == '__main__': unittest.main()
Add tempest config for swift compact environment
Add tempest config for swift compact environment
Python
apache-2.0
eayunstack/fuel-library,ddepaoli3/fuel-library-dev,ddepaoli3/fuel-library-dev,SmartInfrastructures/fuel-library-dev,huntxu/fuel-library,Metaswitch/fuel-library,zhaochao/fuel-library,eayunstack/fuel-library,huntxu/fuel-library,xarses/fuel-library,huntxu/fuel-library,ddepaoli3/fuel-library-dev,slystopad/fuel-lib,ddepaoli3/fuel-library-dev,zhaochao/fuel-library,stackforge/fuel-library,SmartInfrastructures/fuel-library-dev,ddepaoli3/fuel-library-dev,SmartInfrastructures/fuel-library-dev,slystopad/fuel-lib,huntxu/fuel-library,huntxu/fuel-library,slystopad/fuel-lib,zhaochao/fuel-library,eayunstack/fuel-library,zhaochao/fuel-library,xarses/fuel-library,xarses/fuel-library,xarses/fuel-library,slystopad/fuel-lib,stackforge/fuel-library,stackforge/fuel-library,Metaswitch/fuel-library,Metaswitch/fuel-library,eayunstack/fuel-library,SmartInfrastructures/fuel-library-dev,stackforge/fuel-library,zhaochao/fuel-library,eayunstack/fuel-library,SmartInfrastructures/fuel-library-dev,Metaswitch/fuel-library
Add tempest config for swift compact environment
import unittest from devops.helpers import ssh from fuel_test.helpers import safety_revert_nodes, tempest_write_config, make_tempest_objects, tempest_build_config_essex from fuel_test.openstack_swift_compact.openstack_swift_compact_test_case import OpenStackSwiftCompactTestCase class PrepareOpenStackSwiftForTempest(OpenStackSwiftCompactTestCase): def setUp(self): self.environment = self.ci().get_environment() def prepare_for_tempest_if_swift(self): safety_revert_nodes(self.environment.nodes, 'openstack') auth_host = self.ci().get_public_virtual_ip() remote = ssh( self.ci().nodes().controllers[0].ip_address, username='root', password='r00tme').sudo.ssh image_ref, image_ref_alt = make_tempest_objects(auth_host, remote) tempest_write_config(tempest_build_config_essex(auth_host, image_ref, image_ref_alt)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tempest config for swift compact environment<commit_after>
import unittest from devops.helpers import ssh from fuel_test.helpers import safety_revert_nodes, tempest_write_config, make_tempest_objects, tempest_build_config_essex from fuel_test.openstack_swift_compact.openstack_swift_compact_test_case import OpenStackSwiftCompactTestCase class PrepareOpenStackSwiftForTempest(OpenStackSwiftCompactTestCase): def setUp(self): self.environment = self.ci().get_environment() def prepare_for_tempest_if_swift(self): safety_revert_nodes(self.environment.nodes, 'openstack') auth_host = self.ci().get_public_virtual_ip() remote = ssh( self.ci().nodes().controllers[0].ip_address, username='root', password='r00tme').sudo.ssh image_ref, image_ref_alt = make_tempest_objects(auth_host, remote) tempest_write_config(tempest_build_config_essex(auth_host, image_ref, image_ref_alt)) if __name__ == '__main__': unittest.main()
Add tempest config for swift compact environmentimport unittest from devops.helpers import ssh from fuel_test.helpers import safety_revert_nodes, tempest_write_config, make_tempest_objects, tempest_build_config_essex from fuel_test.openstack_swift_compact.openstack_swift_compact_test_case import OpenStackSwiftCompactTestCase class PrepareOpenStackSwiftForTempest(OpenStackSwiftCompactTestCase): def setUp(self): self.environment = self.ci().get_environment() def prepare_for_tempest_if_swift(self): safety_revert_nodes(self.environment.nodes, 'openstack') auth_host = self.ci().get_public_virtual_ip() remote = ssh( self.ci().nodes().controllers[0].ip_address, username='root', password='r00tme').sudo.ssh image_ref, image_ref_alt = make_tempest_objects(auth_host, remote) tempest_write_config(tempest_build_config_essex(auth_host, image_ref, image_ref_alt)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tempest config for swift compact environment<commit_after>import unittest from devops.helpers import ssh from fuel_test.helpers import safety_revert_nodes, tempest_write_config, make_tempest_objects, tempest_build_config_essex from fuel_test.openstack_swift_compact.openstack_swift_compact_test_case import OpenStackSwiftCompactTestCase class PrepareOpenStackSwiftForTempest(OpenStackSwiftCompactTestCase): def setUp(self): self.environment = self.ci().get_environment() def prepare_for_tempest_if_swift(self): safety_revert_nodes(self.environment.nodes, 'openstack') auth_host = self.ci().get_public_virtual_ip() remote = ssh( self.ci().nodes().controllers[0].ip_address, username='root', password='r00tme').sudo.ssh image_ref, image_ref_alt = make_tempest_objects(auth_host, remote) tempest_write_config(tempest_build_config_essex(auth_host, image_ref, image_ref_alt)) if __name__ == '__main__': unittest.main()
450c79939e20f6d9d49729f6c941e7fc8e06950b
examples/ultracoldNeutralPlasma.py
examples/ultracoldNeutralPlasma.py
import ucilib.Sim as Sim import ucilib.BorisUpdater as BorisUpdater import numpy as np # Some helpful constants. fund_charge = 1.602176565e-19 # Mass of Be^+ ions. ion_mass = 8.9465 * 1.673e-27 # Create a simulation with n particles. n = 10000 s = Sim.Sim() s.ptcls.set_nptcls(n) # 1/e radius of cloud. s.ptcls.rmax = 2.0e-4 s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass) # Turn the first n/2 particles into electrons by setting their mass and # charge. s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2) s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2) # Finally we set the updater. s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
Add an example simulation setup for an ultracold neutral plasma.
Add an example simulation setup for an ultracold neutral plasma.
Python
mit
Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions,hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions
Add an example simulation setup for an ultracold neutral plasma.
import ucilib.Sim as Sim import ucilib.BorisUpdater as BorisUpdater import numpy as np # Some helpful constants. fund_charge = 1.602176565e-19 # Mass of Be^+ ions. ion_mass = 8.9465 * 1.673e-27 # Create a simulation with n particles. n = 10000 s = Sim.Sim() s.ptcls.set_nptcls(n) # 1/e radius of cloud. s.ptcls.rmax = 2.0e-4 s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass) # Turn the first n/2 particles into electrons by setting their mass and # charge. s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2) s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2) # Finally we set the updater. s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
<commit_before><commit_msg>Add an example simulation setup for an ultracold neutral plasma.<commit_after>
import ucilib.Sim as Sim import ucilib.BorisUpdater as BorisUpdater import numpy as np # Some helpful constants. fund_charge = 1.602176565e-19 # Mass of Be^+ ions. ion_mass = 8.9465 * 1.673e-27 # Create a simulation with n particles. n = 10000 s = Sim.Sim() s.ptcls.set_nptcls(n) # 1/e radius of cloud. s.ptcls.rmax = 2.0e-4 s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass) # Turn the first n/2 particles into electrons by setting their mass and # charge. s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2) s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2) # Finally we set the updater. s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
Add an example simulation setup for an ultracold neutral plasma.import ucilib.Sim as Sim import ucilib.BorisUpdater as BorisUpdater import numpy as np # Some helpful constants. fund_charge = 1.602176565e-19 # Mass of Be^+ ions. ion_mass = 8.9465 * 1.673e-27 # Create a simulation with n particles. n = 10000 s = Sim.Sim() s.ptcls.set_nptcls(n) # 1/e radius of cloud. s.ptcls.rmax = 2.0e-4 s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass) # Turn the first n/2 particles into electrons by setting their mass and # charge. s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2) s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2) # Finally we set the updater. s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
<commit_before><commit_msg>Add an example simulation setup for an ultracold neutral plasma.<commit_after>import ucilib.Sim as Sim import ucilib.BorisUpdater as BorisUpdater import numpy as np # Some helpful constants. fund_charge = 1.602176565e-19 # Mass of Be^+ ions. ion_mass = 8.9465 * 1.673e-27 # Create a simulation with n particles. n = 10000 s = Sim.Sim() s.ptcls.set_nptcls(n) # 1/e radius of cloud. s.ptcls.rmax = 2.0e-4 s.ptcls.init_ptcls(charge = fund_charge, mass = ion_mass) # Turn the first n/2 particles into electrons by setting their mass and # charge. s.ptcls.q()[:(n/2)] = -fund_charge * np.ones(n/2) s.ptcls.m()[:(n/2)] = 9.1e-31 * np.ones(n/2) # Finally we set the updater. s.updater = BorisUpdater.BorisUpdater(s.ctx, s.queue)
d5347701e21bf13a2bca15978a8f5a81e2d2061a
go/base/management/commands/go_list_opt_outs.py
go/base/management/commands/go_list_opt_outs.py
from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from go.base.utils import vumi_api_for_user from go.vumitools.opt_out import OptOutStore class Command(BaseCommand): help = "List opt-outs from a particular account" LOCAL_OPTIONS = ( make_option('--email-address', dest='email-address', help='Email address for the Vumi Go user'), ) option_list = BaseCommand.option_list + LOCAL_OPTIONS def handle(self, *args, **options): options = options.copy() self.handle_validated(*args, **options) def handle_validated(self, *args, **options): email_address = options['email-address'] try: user = User.objects.get(username=email_address) except User.DoesNotExist, e: raise CommandError(e) user_api = vumi_api_for_user(user) self.show_opt_outs(user_api, email_address) def show_opt_outs(self, user_api, email_address): opt_out_store = OptOutStore(user_api.manager, user_api.user_account_key) opt_outs = opt_out_store.list_opt_outs() print "Address Type, Address, Message ID, Timestamp" print "============================================" for key in opt_outs: addr_type, _colon, addr = key.partition(":") opt_out = opt_out_store.get_opt_out(addr_type, addr) print "%s, %s, %s, %s" % (addr_type, addr, opt_out.message, opt_out.created_at)
Add management command for listing opt outs.
Add management command for listing opt outs.
Python
bsd-3-clause
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
Add management command for listing opt outs.
from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from go.base.utils import vumi_api_for_user from go.vumitools.opt_out import OptOutStore class Command(BaseCommand): help = "List opt-outs from a particular account" LOCAL_OPTIONS = ( make_option('--email-address', dest='email-address', help='Email address for the Vumi Go user'), ) option_list = BaseCommand.option_list + LOCAL_OPTIONS def handle(self, *args, **options): options = options.copy() self.handle_validated(*args, **options) def handle_validated(self, *args, **options): email_address = options['email-address'] try: user = User.objects.get(username=email_address) except User.DoesNotExist, e: raise CommandError(e) user_api = vumi_api_for_user(user) self.show_opt_outs(user_api, email_address) def show_opt_outs(self, user_api, email_address): opt_out_store = OptOutStore(user_api.manager, user_api.user_account_key) opt_outs = opt_out_store.list_opt_outs() print "Address Type, Address, Message ID, Timestamp" print "============================================" for key in opt_outs: addr_type, _colon, addr = key.partition(":") opt_out = opt_out_store.get_opt_out(addr_type, addr) print "%s, %s, %s, %s" % (addr_type, addr, opt_out.message, opt_out.created_at)
<commit_before><commit_msg>Add management command for listing opt outs.<commit_after>
from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from go.base.utils import vumi_api_for_user from go.vumitools.opt_out import OptOutStore class Command(BaseCommand): help = "List opt-outs from a particular account" LOCAL_OPTIONS = ( make_option('--email-address', dest='email-address', help='Email address for the Vumi Go user'), ) option_list = BaseCommand.option_list + LOCAL_OPTIONS def handle(self, *args, **options): options = options.copy() self.handle_validated(*args, **options) def handle_validated(self, *args, **options): email_address = options['email-address'] try: user = User.objects.get(username=email_address) except User.DoesNotExist, e: raise CommandError(e) user_api = vumi_api_for_user(user) self.show_opt_outs(user_api, email_address) def show_opt_outs(self, user_api, email_address): opt_out_store = OptOutStore(user_api.manager, user_api.user_account_key) opt_outs = opt_out_store.list_opt_outs() print "Address Type, Address, Message ID, Timestamp" print "============================================" for key in opt_outs: addr_type, _colon, addr = key.partition(":") opt_out = opt_out_store.get_opt_out(addr_type, addr) print "%s, %s, %s, %s" % (addr_type, addr, opt_out.message, opt_out.created_at)
Add management command for listing opt outs.from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from go.base.utils import vumi_api_for_user from go.vumitools.opt_out import OptOutStore class Command(BaseCommand): help = "List opt-outs from a particular account" LOCAL_OPTIONS = ( make_option('--email-address', dest='email-address', help='Email address for the Vumi Go user'), ) option_list = BaseCommand.option_list + LOCAL_OPTIONS def handle(self, *args, **options): options = options.copy() self.handle_validated(*args, **options) def handle_validated(self, *args, **options): email_address = options['email-address'] try: user = User.objects.get(username=email_address) except User.DoesNotExist, e: raise CommandError(e) user_api = vumi_api_for_user(user) self.show_opt_outs(user_api, email_address) def show_opt_outs(self, user_api, email_address): opt_out_store = OptOutStore(user_api.manager, user_api.user_account_key) opt_outs = opt_out_store.list_opt_outs() print "Address Type, Address, Message ID, Timestamp" print "============================================" for key in opt_outs: addr_type, _colon, addr = key.partition(":") opt_out = opt_out_store.get_opt_out(addr_type, addr) print "%s, %s, %s, %s" % (addr_type, addr, opt_out.message, opt_out.created_at)
<commit_before><commit_msg>Add management command for listing opt outs.<commit_after>from optparse import make_option from django.core.management.base import BaseCommand, CommandError from django.contrib.auth.models import User from go.base.utils import vumi_api_for_user from go.vumitools.opt_out import OptOutStore class Command(BaseCommand): help = "List opt-outs from a particular account" LOCAL_OPTIONS = ( make_option('--email-address', dest='email-address', help='Email address for the Vumi Go user'), ) option_list = BaseCommand.option_list + LOCAL_OPTIONS def handle(self, *args, **options): options = options.copy() self.handle_validated(*args, **options) def handle_validated(self, *args, **options): email_address = options['email-address'] try: user = User.objects.get(username=email_address) except User.DoesNotExist, e: raise CommandError(e) user_api = vumi_api_for_user(user) self.show_opt_outs(user_api, email_address) def show_opt_outs(self, user_api, email_address): opt_out_store = OptOutStore(user_api.manager, user_api.user_account_key) opt_outs = opt_out_store.list_opt_outs() print "Address Type, Address, Message ID, Timestamp" print "============================================" for key in opt_outs: addr_type, _colon, addr = key.partition(":") opt_out = opt_out_store.get_opt_out(addr_type, addr) print "%s, %s, %s, %s" % (addr_type, addr, opt_out.message, opt_out.created_at)
1e6d657611eeb3b4bb025954bd1478c6888e38d1
python/examples/submit_survey.py
python/examples/submit_survey.py
#!/usr/bin/env python import sys sys.path += ['..'] try: import json except ImportError: import simplejson as json from epidb.client import EpiDBClient api_key = 'your-epidb-api-key-here' data = { 'user_id': '1c66bb91-33fd-4c6c-9c11-8ddd94164ae8', 'date': '2009-09-09 09:09:09', 'answers': { 'q1': 1, 'q2': True, 'q3': [ 1, 2, 3 ], 'q4': 'Jakarta' } } param = json.dumps(data) client = EpiDBClient(api_key) res = client.survey_submit(param) result = json.loads(res) status = result['stat'] print "status:", status if status == 'ok': print "id:", result['id'] else: print "error code:", result['code'] print " msg:", result['msg']
Add example: submit survey result.
[python] Add example: submit survey result.
Python
agpl-3.0
ISIFoundation/influenzanet-epidb-client
[python] Add example: submit survey result.
#!/usr/bin/env python import sys sys.path += ['..'] try: import json except ImportError: import simplejson as json from epidb.client import EpiDBClient api_key = 'your-epidb-api-key-here' data = { 'user_id': '1c66bb91-33fd-4c6c-9c11-8ddd94164ae8', 'date': '2009-09-09 09:09:09', 'answers': { 'q1': 1, 'q2': True, 'q3': [ 1, 2, 3 ], 'q4': 'Jakarta' } } param = json.dumps(data) client = EpiDBClient(api_key) res = client.survey_submit(param) result = json.loads(res) status = result['stat'] print "status:", status if status == 'ok': print "id:", result['id'] else: print "error code:", result['code'] print " msg:", result['msg']
<commit_before><commit_msg>[python] Add example: submit survey result.<commit_after>
#!/usr/bin/env python import sys sys.path += ['..'] try: import json except ImportError: import simplejson as json from epidb.client import EpiDBClient api_key = 'your-epidb-api-key-here' data = { 'user_id': '1c66bb91-33fd-4c6c-9c11-8ddd94164ae8', 'date': '2009-09-09 09:09:09', 'answers': { 'q1': 1, 'q2': True, 'q3': [ 1, 2, 3 ], 'q4': 'Jakarta' } } param = json.dumps(data) client = EpiDBClient(api_key) res = client.survey_submit(param) result = json.loads(res) status = result['stat'] print "status:", status if status == 'ok': print "id:", result['id'] else: print "error code:", result['code'] print " msg:", result['msg']
[python] Add example: submit survey result.#!/usr/bin/env python import sys sys.path += ['..'] try: import json except ImportError: import simplejson as json from epidb.client import EpiDBClient api_key = 'your-epidb-api-key-here' data = { 'user_id': '1c66bb91-33fd-4c6c-9c11-8ddd94164ae8', 'date': '2009-09-09 09:09:09', 'answers': { 'q1': 1, 'q2': True, 'q3': [ 1, 2, 3 ], 'q4': 'Jakarta' } } param = json.dumps(data) client = EpiDBClient(api_key) res = client.survey_submit(param) result = json.loads(res) status = result['stat'] print "status:", status if status == 'ok': print "id:", result['id'] else: print "error code:", result['code'] print " msg:", result['msg']
<commit_before><commit_msg>[python] Add example: submit survey result.<commit_after>#!/usr/bin/env python import sys sys.path += ['..'] try: import json except ImportError: import simplejson as json from epidb.client import EpiDBClient api_key = 'your-epidb-api-key-here' data = { 'user_id': '1c66bb91-33fd-4c6c-9c11-8ddd94164ae8', 'date': '2009-09-09 09:09:09', 'answers': { 'q1': 1, 'q2': True, 'q3': [ 1, 2, 3 ], 'q4': 'Jakarta' } } param = json.dumps(data) client = EpiDBClient(api_key) res = client.survey_submit(param) result = json.loads(res) status = result['stat'] print "status:", status if status == 'ok': print "id:", result['id'] else: print "error code:", result['code'] print " msg:", result['msg']
b452683c2324b2ca7a1b43dbc8dc83354530d04a
scripts/migrate_zendeley_provider_names.py
scripts/migrate_zendeley_provider_names.py
# -*- coding: utf-8 -*- """Migration to add the correct provider_name for Zotero and Mendeley ExternalAccounts that are missing it. """ import sys import logging from scripts import utils as scripts_utils from modularodm import Q from website.app import init_app from website.models import ExternalAccount logger = logging.getLogger(__name__) def get_targets(): return ExternalAccount.find(Q('provider', 'eq', 'zotero') | Q('provider', 'eq', 'mendeley')) name_map = { 'zotero': 'Zotero', 'mendeley': 'Mendeley', } def migrate_extaccount(acct, dry=True): if not acct.provider_name: logger.info('Missing provider name for ExternalAccount {}'.format(acct._id)) provider_name = name_map[acct.provider] logger.info('setting to {}'.format(acct._id)) if not dry: acct.provider_name = provider_name acct.save() return True return False def main(dry=True): count = 0 for each in get_targets(): migrated = migrate_extaccount(each, dry=dry) if migrated: count += 1 logger.info('Migrated {} ExternalAccounts'.format(count)) if __name__ == '__main__': dry = 'dry' in sys.argv # Log to file if not dry: scripts_utils.add_file_logger(logger, __file__) init_app(routes=False, mfr=False, set_backend=True) main(dry=dry)
Add migration to add correct provider_name
Add migration to add correct provider_name
Python
apache-2.0
DanielSBrown/osf.io,jeffreyliu3230/osf.io,bdyetton/prettychart,mfraezz/osf.io,lamdnhan/osf.io,kch8qx/osf.io,GaryKriebel/osf.io,MerlinZhang/osf.io,jinluyuan/osf.io,lyndsysimon/osf.io,emetsger/osf.io,cldershem/osf.io,caneruguz/osf.io,MerlinZhang/osf.io,SSJohns/osf.io,felliott/osf.io,GageGaskins/osf.io,cldershem/osf.io,TomHeatwole/osf.io,mluo613/osf.io,emetsger/osf.io,samchrisinger/osf.io,lamdnhan/osf.io,asanfilippo7/osf.io,TomBaxter/osf.io,zamattiac/osf.io,Ghalko/osf.io,cwisecarver/osf.io,KAsante95/osf.io,brianjgeiger/osf.io,binoculars/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,zamattiac/osf.io,wearpants/osf.io,jinluyuan/osf.io,doublebits/osf.io,sbt9uc/osf.io,haoyuchen1992/osf.io,dplorimer/osf,GageGaskins/osf.io,MerlinZhang/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,SSJohns/osf.io,TomHeatwole/osf.io,Ghalko/osf.io,danielneis/osf.io,zkraime/osf.io,laurenrevere/osf.io,icereval/osf.io,acshi/osf.io,kwierman/osf.io,monikagrabowska/osf.io,billyhunt/osf.io,amyshi188/osf.io,njantrania/osf.io,haoyuchen1992/osf.io,zachjanicki/osf.io,barbour-em/osf.io,sloria/osf.io,KAsante95/osf.io,barbour-em/osf.io,revanthkolli/osf.io,ZobairAlijan/osf.io,bdyetton/prettychart,mattclark/osf.io,aaxelb/osf.io,SSJohns/osf.io,petermalcolm/osf.io,chrisseto/osf.io,cslzchen/osf.io,himanshuo/osf.io,himanshuo/osf.io,cslzchen/osf.io,abought/osf.io,monikagrabowska/osf.io,sbt9uc/osf.io,Nesiehr/osf.io,alexschiller/osf.io,acshi/osf.io,rdhyee/osf.io,reinaH/osf.io,kch8qx/osf.io,jeffreyliu3230/osf.io,laurenrevere/osf.io,zamattiac/osf.io,mfraezz/osf.io,chennan47/osf.io,samchrisinger/osf.io,zachjanicki/osf.io,chennan47/osf.io,samanehsan/osf.io,RomanZWang/osf.io,haoyuchen1992/osf.io,KAsante95/osf.io,CenterForOpenScience/osf.io,mluo613/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,reinaH/osf.io,alexschiller/osf.io,amyshi188/osf.io,jeffreyliu3230/osf.io,saradbowman/osf.io,mluke93/osf.io,asanfilippo7/osf.io,jolene-esposito/osf.io,ZobairAlijan/osf.io,mluo613/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,pattisdr/osf.io,jmcarp/osf.io,revanthkolli/osf.io,samanehsan/osf.io,CenterForOpenScience/osf.io,zamattiac/osf.io,GageGaskins/osf.io,cosenal/osf.io,aaxelb/osf.io,lyndsysimon/osf.io,petermalcolm/osf.io,danielneis/osf.io,jmcarp/osf.io,kwierman/osf.io,erinspace/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,abought/osf.io,jolene-esposito/osf.io,GaryKriebel/osf.io,lamdnhan/osf.io,billyhunt/osf.io,fabianvf/osf.io,barbour-em/osf.io,cwisecarver/osf.io,ckc6cz/osf.io,adlius/osf.io,hmoco/osf.io,himanshuo/osf.io,mluo613/osf.io,abought/osf.io,baylee-d/osf.io,zkraime/osf.io,njantrania/osf.io,lamdnhan/osf.io,KAsante95/osf.io,caseyrollins/osf.io,doublebits/osf.io,sloria/osf.io,leb2dg/osf.io,TomHeatwole/osf.io,caneruguz/osf.io,hmoco/osf.io,acshi/osf.io,GageGaskins/osf.io,arpitar/osf.io,billyhunt/osf.io,zachjanicki/osf.io,cosenal/osf.io,RomanZWang/osf.io,cosenal/osf.io,jnayak1/osf.io,cldershem/osf.io,sbt9uc/osf.io,caseyrygt/osf.io,jeffreyliu3230/osf.io,jnayak1/osf.io,adlius/osf.io,monikagrabowska/osf.io,haoyuchen1992/osf.io,asanfilippo7/osf.io,lyndsysimon/osf.io,billyhunt/osf.io,brianjgeiger/osf.io,Ghalko/osf.io,cldershem/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,Johnetordoff/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,alexschiller/osf.io,icereval/osf.io,dplorimer/osf,TomBaxter/osf.io,Johnetordoff/osf.io,RomanZWang/osf.io,kwierman/osf.io,samanehsan/osf.io,amyshi188/osf.io,jmcarp/osf.io,arpitar/osf.io,dplorimer/osf,danielneis/osf.io,brandonPurvis/osf.io,revanthkolli/osf.io,jmcarp/osf.io,brianjgeiger/osf.io,wearpants/osf.io,erinspace/osf.io,jnayak1/osf.io,njantrania/osf.io,fabianvf/osf.io,caseyrollins/osf.io,RomanZWang/osf.io,samchrisinger/osf.io,mluke93/osf.io,GaryKriebel/osf.io,baylee-d/osf.io,caseyrygt/osf.io,jolene-esposito/osf.io,GaryKriebel/osf.io,rdhyee/osf.io,DanielSBrown/osf.io,ckc6cz/osf.io,cwisecarver/osf.io,baylee-d/osf.io,arpitar/osf.io,bdyetton/prettychart,felliott/osf.io,petermalcolm/osf.io,mattclark/osf.io,felliott/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,kch8qx/osf.io,himanshuo/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,jnayak1/osf.io,amyshi188/osf.io,alexschiller/osf.io,rdhyee/osf.io,kwierman/osf.io,dplorimer/osf,njantrania/osf.io,fabianvf/osf.io,monikagrabowska/osf.io,sbt9uc/osf.io,monikagrabowska/osf.io,jolene-esposito/osf.io,Ghalko/osf.io,cosenal/osf.io,samchrisinger/osf.io,leb2dg/osf.io,saradbowman/osf.io,danielneis/osf.io,CenterForOpenScience/osf.io,billyhunt/osf.io,wearpants/osf.io,jinluyuan/osf.io,GageGaskins/osf.io,jinluyuan/osf.io,acshi/osf.io,alexschiller/osf.io,HarryRybacki/osf.io,zkraime/osf.io,HalcyonChimera/osf.io,binoculars/osf.io,acshi/osf.io,adlius/osf.io,doublebits/osf.io,pattisdr/osf.io,hmoco/osf.io,lyndsysimon/osf.io,wearpants/osf.io,doublebits/osf.io,barbour-em/osf.io,chrisseto/osf.io,SSJohns/osf.io,chrisseto/osf.io,pattisdr/osf.io,mluke93/osf.io,abought/osf.io,kch8qx/osf.io,mattclark/osf.io,mfraezz/osf.io,aaxelb/osf.io,emetsger/osf.io,rdhyee/osf.io,reinaH/osf.io,emetsger/osf.io,mluke93/osf.io,mluo613/osf.io,crcresearch/osf.io,ZobairAlijan/osf.io,Nesiehr/osf.io,ticklemepierce/osf.io,ZobairAlijan/osf.io,laurenrevere/osf.io,ticklemepierce/osf.io,caneruguz/osf.io,felliott/osf.io,HarryRybacki/osf.io,arpitar/osf.io,zachjanicki/osf.io,crcresearch/osf.io,MerlinZhang/osf.io,aaxelb/osf.io,hmoco/osf.io,adlius/osf.io,icereval/osf.io,Nesiehr/osf.io,petermalcolm/osf.io,HalcyonChimera/osf.io,KAsante95/osf.io,bdyetton/prettychart,brandonPurvis/osf.io,samanehsan/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,reinaH/osf.io,ticklemepierce/osf.io,binoculars/osf.io,zkraime/osf.io,revanthkolli/osf.io,Nesiehr/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,ckc6cz/osf.io,DanielSBrown/osf.io,brandonPurvis/osf.io,doublebits/osf.io,leb2dg/osf.io,ticklemepierce/osf.io,TomBaxter/osf.io,ckc6cz/osf.io,caseyrygt/osf.io,caseyrollins/osf.io,fabianvf/osf.io,erinspace/osf.io,caseyrygt/osf.io
Add migration to add correct provider_name
# -*- coding: utf-8 -*- """Migration to add the correct provider_name for Zotero and Mendeley ExternalAccounts that are missing it. """ import sys import logging from scripts import utils as scripts_utils from modularodm import Q from website.app import init_app from website.models import ExternalAccount logger = logging.getLogger(__name__) def get_targets(): return ExternalAccount.find(Q('provider', 'eq', 'zotero') | Q('provider', 'eq', 'mendeley')) name_map = { 'zotero': 'Zotero', 'mendeley': 'Mendeley', } def migrate_extaccount(acct, dry=True): if not acct.provider_name: logger.info('Missing provider name for ExternalAccount {}'.format(acct._id)) provider_name = name_map[acct.provider] logger.info('setting to {}'.format(acct._id)) if not dry: acct.provider_name = provider_name acct.save() return True return False def main(dry=True): count = 0 for each in get_targets(): migrated = migrate_extaccount(each, dry=dry) if migrated: count += 1 logger.info('Migrated {} ExternalAccounts'.format(count)) if __name__ == '__main__': dry = 'dry' in sys.argv # Log to file if not dry: scripts_utils.add_file_logger(logger, __file__) init_app(routes=False, mfr=False, set_backend=True) main(dry=dry)
<commit_before><commit_msg>Add migration to add correct provider_name<commit_after>
# -*- coding: utf-8 -*- """Migration to add the correct provider_name for Zotero and Mendeley ExternalAccounts that are missing it. """ import sys import logging from scripts import utils as scripts_utils from modularodm import Q from website.app import init_app from website.models import ExternalAccount logger = logging.getLogger(__name__) def get_targets(): return ExternalAccount.find(Q('provider', 'eq', 'zotero') | Q('provider', 'eq', 'mendeley')) name_map = { 'zotero': 'Zotero', 'mendeley': 'Mendeley', } def migrate_extaccount(acct, dry=True): if not acct.provider_name: logger.info('Missing provider name for ExternalAccount {}'.format(acct._id)) provider_name = name_map[acct.provider] logger.info('setting to {}'.format(acct._id)) if not dry: acct.provider_name = provider_name acct.save() return True return False def main(dry=True): count = 0 for each in get_targets(): migrated = migrate_extaccount(each, dry=dry) if migrated: count += 1 logger.info('Migrated {} ExternalAccounts'.format(count)) if __name__ == '__main__': dry = 'dry' in sys.argv # Log to file if not dry: scripts_utils.add_file_logger(logger, __file__) init_app(routes=False, mfr=False, set_backend=True) main(dry=dry)
Add migration to add correct provider_name# -*- coding: utf-8 -*- """Migration to add the correct provider_name for Zotero and Mendeley ExternalAccounts that are missing it. """ import sys import logging from scripts import utils as scripts_utils from modularodm import Q from website.app import init_app from website.models import ExternalAccount logger = logging.getLogger(__name__) def get_targets(): return ExternalAccount.find(Q('provider', 'eq', 'zotero') | Q('provider', 'eq', 'mendeley')) name_map = { 'zotero': 'Zotero', 'mendeley': 'Mendeley', } def migrate_extaccount(acct, dry=True): if not acct.provider_name: logger.info('Missing provider name for ExternalAccount {}'.format(acct._id)) provider_name = name_map[acct.provider] logger.info('setting to {}'.format(acct._id)) if not dry: acct.provider_name = provider_name acct.save() return True return False def main(dry=True): count = 0 for each in get_targets(): migrated = migrate_extaccount(each, dry=dry) if migrated: count += 1 logger.info('Migrated {} ExternalAccounts'.format(count)) if __name__ == '__main__': dry = 'dry' in sys.argv # Log to file if not dry: scripts_utils.add_file_logger(logger, __file__) init_app(routes=False, mfr=False, set_backend=True) main(dry=dry)
<commit_before><commit_msg>Add migration to add correct provider_name<commit_after># -*- coding: utf-8 -*- """Migration to add the correct provider_name for Zotero and Mendeley ExternalAccounts that are missing it. """ import sys import logging from scripts import utils as scripts_utils from modularodm import Q from website.app import init_app from website.models import ExternalAccount logger = logging.getLogger(__name__) def get_targets(): return ExternalAccount.find(Q('provider', 'eq', 'zotero') | Q('provider', 'eq', 'mendeley')) name_map = { 'zotero': 'Zotero', 'mendeley': 'Mendeley', } def migrate_extaccount(acct, dry=True): if not acct.provider_name: logger.info('Missing provider name for ExternalAccount {}'.format(acct._id)) provider_name = name_map[acct.provider] logger.info('setting to {}'.format(acct._id)) if not dry: acct.provider_name = provider_name acct.save() return True return False def main(dry=True): count = 0 for each in get_targets(): migrated = migrate_extaccount(each, dry=dry) if migrated: count += 1 logger.info('Migrated {} ExternalAccounts'.format(count)) if __name__ == '__main__': dry = 'dry' in sys.argv # Log to file if not dry: scripts_utils.add_file_logger(logger, __file__) init_app(routes=False, mfr=False, set_backend=True) main(dry=dry)
ac9d7c4af5221aac94b0d599e77abd8738b63611
distarray/apps/dacluster.py
distarray/apps/dacluster.py
#!/usr/bin/env python import argparse import sys import ipcluster_tools import purge_cluster class ArgumentParser(argparse.ArgumentParser): def error(self, message): # We failed parsing the args, pass them directly to ipcluster # to see if it can handle them. ipcluster_tools.run_ipcluster(sys.argv[1:]) description = """ Start, stop and manage a IPython.parallel cluster. `dacluster` can take all the commands IPython's `ipcluster` can, and a few extras that are distarray specific. """ parser = ArgumentParser(description=description) # Print help if no command line args are supplied if len(sys.argv) == 1: parser.print_help() sys.exit(1) subparsers = parser.add_subparsers(help='subparsers') # subparses for all our commands parser_start = subparsers.add_parser('start') parser_stop = subparsers.add_parser('stop') parser_restart = subparsers.add_parser('restart') parser_clear = subparsers.add_parser('clear') parser_purge = subparsers.add_parser('purge') # set the functions each command should use parser_start.set_defaults(func=ipcluster_tools.start) parser_stop.set_defaults(func=ipcluster_tools.stop) parser_restart.set_defaults(func=ipcluster_tools.restart) parser_clear.set_defaults(func=ipcluster_tools.clear) parser_purge.set_defaults(func=purge_cluster.purge) def main(): args = parser.parse_args() args.func()
Add CLI entry point for cluster management.
Add CLI entry point for cluster management.
Python
bsd-3-clause
RaoUmer/distarray,enthought/distarray,enthought/distarray,RaoUmer/distarray
Add CLI entry point for cluster management.
#!/usr/bin/env python import argparse import sys import ipcluster_tools import purge_cluster class ArgumentParser(argparse.ArgumentParser): def error(self, message): # We failed parsing the args, pass them directly to ipcluster # to see if it can handle them. ipcluster_tools.run_ipcluster(sys.argv[1:]) description = """ Start, stop and manage a IPython.parallel cluster. `dacluster` can take all the commands IPython's `ipcluster` can, and a few extras that are distarray specific. """ parser = ArgumentParser(description=description) # Print help if no command line args are supplied if len(sys.argv) == 1: parser.print_help() sys.exit(1) subparsers = parser.add_subparsers(help='subparsers') # subparses for all our commands parser_start = subparsers.add_parser('start') parser_stop = subparsers.add_parser('stop') parser_restart = subparsers.add_parser('restart') parser_clear = subparsers.add_parser('clear') parser_purge = subparsers.add_parser('purge') # set the functions each command should use parser_start.set_defaults(func=ipcluster_tools.start) parser_stop.set_defaults(func=ipcluster_tools.stop) parser_restart.set_defaults(func=ipcluster_tools.restart) parser_clear.set_defaults(func=ipcluster_tools.clear) parser_purge.set_defaults(func=purge_cluster.purge) def main(): args = parser.parse_args() args.func()
<commit_before><commit_msg>Add CLI entry point for cluster management.<commit_after>
#!/usr/bin/env python import argparse import sys import ipcluster_tools import purge_cluster class ArgumentParser(argparse.ArgumentParser): def error(self, message): # We failed parsing the args, pass them directly to ipcluster # to see if it can handle them. ipcluster_tools.run_ipcluster(sys.argv[1:]) description = """ Start, stop and manage a IPython.parallel cluster. `dacluster` can take all the commands IPython's `ipcluster` can, and a few extras that are distarray specific. """ parser = ArgumentParser(description=description) # Print help if no command line args are supplied if len(sys.argv) == 1: parser.print_help() sys.exit(1) subparsers = parser.add_subparsers(help='subparsers') # subparses for all our commands parser_start = subparsers.add_parser('start') parser_stop = subparsers.add_parser('stop') parser_restart = subparsers.add_parser('restart') parser_clear = subparsers.add_parser('clear') parser_purge = subparsers.add_parser('purge') # set the functions each command should use parser_start.set_defaults(func=ipcluster_tools.start) parser_stop.set_defaults(func=ipcluster_tools.stop) parser_restart.set_defaults(func=ipcluster_tools.restart) parser_clear.set_defaults(func=ipcluster_tools.clear) parser_purge.set_defaults(func=purge_cluster.purge) def main(): args = parser.parse_args() args.func()
Add CLI entry point for cluster management.#!/usr/bin/env python import argparse import sys import ipcluster_tools import purge_cluster class ArgumentParser(argparse.ArgumentParser): def error(self, message): # We failed parsing the args, pass them directly to ipcluster # to see if it can handle them. ipcluster_tools.run_ipcluster(sys.argv[1:]) description = """ Start, stop and manage a IPython.parallel cluster. `dacluster` can take all the commands IPython's `ipcluster` can, and a few extras that are distarray specific. """ parser = ArgumentParser(description=description) # Print help if no command line args are supplied if len(sys.argv) == 1: parser.print_help() sys.exit(1) subparsers = parser.add_subparsers(help='subparsers') # subparses for all our commands parser_start = subparsers.add_parser('start') parser_stop = subparsers.add_parser('stop') parser_restart = subparsers.add_parser('restart') parser_clear = subparsers.add_parser('clear') parser_purge = subparsers.add_parser('purge') # set the functions each command should use parser_start.set_defaults(func=ipcluster_tools.start) parser_stop.set_defaults(func=ipcluster_tools.stop) parser_restart.set_defaults(func=ipcluster_tools.restart) parser_clear.set_defaults(func=ipcluster_tools.clear) parser_purge.set_defaults(func=purge_cluster.purge) def main(): args = parser.parse_args() args.func()
<commit_before><commit_msg>Add CLI entry point for cluster management.<commit_after>#!/usr/bin/env python import argparse import sys import ipcluster_tools import purge_cluster class ArgumentParser(argparse.ArgumentParser): def error(self, message): # We failed parsing the args, pass them directly to ipcluster # to see if it can handle them. ipcluster_tools.run_ipcluster(sys.argv[1:]) description = """ Start, stop and manage a IPython.parallel cluster. `dacluster` can take all the commands IPython's `ipcluster` can, and a few extras that are distarray specific. """ parser = ArgumentParser(description=description) # Print help if no command line args are supplied if len(sys.argv) == 1: parser.print_help() sys.exit(1) subparsers = parser.add_subparsers(help='subparsers') # subparses for all our commands parser_start = subparsers.add_parser('start') parser_stop = subparsers.add_parser('stop') parser_restart = subparsers.add_parser('restart') parser_clear = subparsers.add_parser('clear') parser_purge = subparsers.add_parser('purge') # set the functions each command should use parser_start.set_defaults(func=ipcluster_tools.start) parser_stop.set_defaults(func=ipcluster_tools.stop) parser_restart.set_defaults(func=ipcluster_tools.restart) parser_clear.set_defaults(func=ipcluster_tools.clear) parser_purge.set_defaults(func=purge_cluster.purge) def main(): args = parser.parse_args() args.func()
1d9746cc1fc6b0885aa721748c0dbc97cea88b2d
scripts/create_blitzermi_pivots.py
scripts/create_blitzermi_pivots.py
#!/usr/bin/env python import sys from sklearn.datasets import load_svmlight_file from sklearn.feature_selection import mutual_info_classif as mi import numpy as np import scipy.sparse from os.path import dirname, join from uda_common import read_feature_groups def main(args): if len(args) < 2: sys.stderr.write("Error: Two required arguments: <reduced training data> <0|1 (which domain is source/target)\n") sys.exit(-1) num_pivots = 100 data_file = args[0] direction = int(args[1]) data_dir = dirname(data_file) groups_file = join(data_dir, 'reduced-feature-groups.txt') ## Find the feature index that tells us what domain we're in: group_map = read_feature_groups(groups_file) domain_indices = group_map["Domain"] if direction == 0: source_ind, target_ind = domain_indices else: target_ind, source_ind = domain_indices ## load the data: all_X, all_y = load_svmlight_file(data_file) num_instances, num_feats = all_X.shape source_inds = np.where(all_X[:,source_ind].toarray() != 0)[0] target_inds = np.where(all_X[:,target_ind].toarray() != 0)[0] source_X = all_X[source_inds,:] target_X = all_X[target_inds,:] freq_mask = np.asarray( ((source_X.sum(0) > 10) & (target_X.sum(0) > 10)).astype('int') )[0] mi_label = mi(source_X, all_y[source_inds]) ## mi is between 0 (no information) and 1 (perfect information) mi_joint = mi_label * freq_mask ## I want high values, so I reverse the list and sort ranked_inds = np.argsort(1 - mi_joint) pivots = np.sort(ranked_inds[:num_pivots]) for pivot in pivots: print(pivot) if __name__ == '__main__': args = sys.argv[1:] main(args)
Add script for doing MI pivots the original blitzer way.
Add script for doing MI pivots the original blitzer way.
Python
apache-2.0
tmills/uda,tmills/uda
Add script for doing MI pivots the original blitzer way.
#!/usr/bin/env python import sys from sklearn.datasets import load_svmlight_file from sklearn.feature_selection import mutual_info_classif as mi import numpy as np import scipy.sparse from os.path import dirname, join from uda_common import read_feature_groups def main(args): if len(args) < 2: sys.stderr.write("Error: Two required arguments: <reduced training data> <0|1 (which domain is source/target)\n") sys.exit(-1) num_pivots = 100 data_file = args[0] direction = int(args[1]) data_dir = dirname(data_file) groups_file = join(data_dir, 'reduced-feature-groups.txt') ## Find the feature index that tells us what domain we're in: group_map = read_feature_groups(groups_file) domain_indices = group_map["Domain"] if direction == 0: source_ind, target_ind = domain_indices else: target_ind, source_ind = domain_indices ## load the data: all_X, all_y = load_svmlight_file(data_file) num_instances, num_feats = all_X.shape source_inds = np.where(all_X[:,source_ind].toarray() != 0)[0] target_inds = np.where(all_X[:,target_ind].toarray() != 0)[0] source_X = all_X[source_inds,:] target_X = all_X[target_inds,:] freq_mask = np.asarray( ((source_X.sum(0) > 10) & (target_X.sum(0) > 10)).astype('int') )[0] mi_label = mi(source_X, all_y[source_inds]) ## mi is between 0 (no information) and 1 (perfect information) mi_joint = mi_label * freq_mask ## I want high values, so I reverse the list and sort ranked_inds = np.argsort(1 - mi_joint) pivots = np.sort(ranked_inds[:num_pivots]) for pivot in pivots: print(pivot) if __name__ == '__main__': args = sys.argv[1:] main(args)
<commit_before><commit_msg>Add script for doing MI pivots the original blitzer way.<commit_after>
#!/usr/bin/env python import sys from sklearn.datasets import load_svmlight_file from sklearn.feature_selection import mutual_info_classif as mi import numpy as np import scipy.sparse from os.path import dirname, join from uda_common import read_feature_groups def main(args): if len(args) < 2: sys.stderr.write("Error: Two required arguments: <reduced training data> <0|1 (which domain is source/target)\n") sys.exit(-1) num_pivots = 100 data_file = args[0] direction = int(args[1]) data_dir = dirname(data_file) groups_file = join(data_dir, 'reduced-feature-groups.txt') ## Find the feature index that tells us what domain we're in: group_map = read_feature_groups(groups_file) domain_indices = group_map["Domain"] if direction == 0: source_ind, target_ind = domain_indices else: target_ind, source_ind = domain_indices ## load the data: all_X, all_y = load_svmlight_file(data_file) num_instances, num_feats = all_X.shape source_inds = np.where(all_X[:,source_ind].toarray() != 0)[0] target_inds = np.where(all_X[:,target_ind].toarray() != 0)[0] source_X = all_X[source_inds,:] target_X = all_X[target_inds,:] freq_mask = np.asarray( ((source_X.sum(0) > 10) & (target_X.sum(0) > 10)).astype('int') )[0] mi_label = mi(source_X, all_y[source_inds]) ## mi is between 0 (no information) and 1 (perfect information) mi_joint = mi_label * freq_mask ## I want high values, so I reverse the list and sort ranked_inds = np.argsort(1 - mi_joint) pivots = np.sort(ranked_inds[:num_pivots]) for pivot in pivots: print(pivot) if __name__ == '__main__': args = sys.argv[1:] main(args)
Add script for doing MI pivots the original blitzer way.#!/usr/bin/env python import sys from sklearn.datasets import load_svmlight_file from sklearn.feature_selection import mutual_info_classif as mi import numpy as np import scipy.sparse from os.path import dirname, join from uda_common import read_feature_groups def main(args): if len(args) < 2: sys.stderr.write("Error: Two required arguments: <reduced training data> <0|1 (which domain is source/target)\n") sys.exit(-1) num_pivots = 100 data_file = args[0] direction = int(args[1]) data_dir = dirname(data_file) groups_file = join(data_dir, 'reduced-feature-groups.txt') ## Find the feature index that tells us what domain we're in: group_map = read_feature_groups(groups_file) domain_indices = group_map["Domain"] if direction == 0: source_ind, target_ind = domain_indices else: target_ind, source_ind = domain_indices ## load the data: all_X, all_y = load_svmlight_file(data_file) num_instances, num_feats = all_X.shape source_inds = np.where(all_X[:,source_ind].toarray() != 0)[0] target_inds = np.where(all_X[:,target_ind].toarray() != 0)[0] source_X = all_X[source_inds,:] target_X = all_X[target_inds,:] freq_mask = np.asarray( ((source_X.sum(0) > 10) & (target_X.sum(0) > 10)).astype('int') )[0] mi_label = mi(source_X, all_y[source_inds]) ## mi is between 0 (no information) and 1 (perfect information) mi_joint = mi_label * freq_mask ## I want high values, so I reverse the list and sort ranked_inds = np.argsort(1 - mi_joint) pivots = np.sort(ranked_inds[:num_pivots]) for pivot in pivots: print(pivot) if __name__ == '__main__': args = sys.argv[1:] main(args)
<commit_before><commit_msg>Add script for doing MI pivots the original blitzer way.<commit_after>#!/usr/bin/env python import sys from sklearn.datasets import load_svmlight_file from sklearn.feature_selection import mutual_info_classif as mi import numpy as np import scipy.sparse from os.path import dirname, join from uda_common import read_feature_groups def main(args): if len(args) < 2: sys.stderr.write("Error: Two required arguments: <reduced training data> <0|1 (which domain is source/target)\n") sys.exit(-1) num_pivots = 100 data_file = args[0] direction = int(args[1]) data_dir = dirname(data_file) groups_file = join(data_dir, 'reduced-feature-groups.txt') ## Find the feature index that tells us what domain we're in: group_map = read_feature_groups(groups_file) domain_indices = group_map["Domain"] if direction == 0: source_ind, target_ind = domain_indices else: target_ind, source_ind = domain_indices ## load the data: all_X, all_y = load_svmlight_file(data_file) num_instances, num_feats = all_X.shape source_inds = np.where(all_X[:,source_ind].toarray() != 0)[0] target_inds = np.where(all_X[:,target_ind].toarray() != 0)[0] source_X = all_X[source_inds,:] target_X = all_X[target_inds,:] freq_mask = np.asarray( ((source_X.sum(0) > 10) & (target_X.sum(0) > 10)).astype('int') )[0] mi_label = mi(source_X, all_y[source_inds]) ## mi is between 0 (no information) and 1 (perfect information) mi_joint = mi_label * freq_mask ## I want high values, so I reverse the list and sort ranked_inds = np.argsort(1 - mi_joint) pivots = np.sort(ranked_inds[:num_pivots]) for pivot in pivots: print(pivot) if __name__ == '__main__': args = sys.argv[1:] main(args)
fc6c7a743d4f177e76caa282c31c4cf11f2fb0bc
edgedb/lang/schema/quote.py
edgedb/lang/schema/quote.py
## # Copyright (c) 2016 MagicStack Inc. # All rights reserved. # # See LICENSE for details. ## import re from .parser.grammar import keywords _re_ident = re.compile(r'(?:[^\W\d]|\$)(?:\w|\$)*') def quote_literal(text): return "'" + text.replace("'", R"\'") + "'" def dollar_quote_literal(text): quote = '$$' qq = 0 while quote in text: if qq % 16 < 10: qq += 10 - qq % 16 quote = '${:x}$'.format(qq)[::-1] qq += 1 return quote + text + quote def disambiguate_identifier(text): if (keywords.edge_schema_keywords.get(text) or not _re_ident.fullmatch(text)): return '`{}`'.format(text) else: return text
Fix missing quoting module for schema.
Fix missing quoting module for schema.
Python
apache-2.0
edgedb/edgedb,edgedb/edgedb,edgedb/edgedb
Fix missing quoting module for schema.
## # Copyright (c) 2016 MagicStack Inc. # All rights reserved. # # See LICENSE for details. ## import re from .parser.grammar import keywords _re_ident = re.compile(r'(?:[^\W\d]|\$)(?:\w|\$)*') def quote_literal(text): return "'" + text.replace("'", R"\'") + "'" def dollar_quote_literal(text): quote = '$$' qq = 0 while quote in text: if qq % 16 < 10: qq += 10 - qq % 16 quote = '${:x}$'.format(qq)[::-1] qq += 1 return quote + text + quote def disambiguate_identifier(text): if (keywords.edge_schema_keywords.get(text) or not _re_ident.fullmatch(text)): return '`{}`'.format(text) else: return text
<commit_before><commit_msg>Fix missing quoting module for schema.<commit_after>
## # Copyright (c) 2016 MagicStack Inc. # All rights reserved. # # See LICENSE for details. ## import re from .parser.grammar import keywords _re_ident = re.compile(r'(?:[^\W\d]|\$)(?:\w|\$)*') def quote_literal(text): return "'" + text.replace("'", R"\'") + "'" def dollar_quote_literal(text): quote = '$$' qq = 0 while quote in text: if qq % 16 < 10: qq += 10 - qq % 16 quote = '${:x}$'.format(qq)[::-1] qq += 1 return quote + text + quote def disambiguate_identifier(text): if (keywords.edge_schema_keywords.get(text) or not _re_ident.fullmatch(text)): return '`{}`'.format(text) else: return text
Fix missing quoting module for schema.## # Copyright (c) 2016 MagicStack Inc. # All rights reserved. # # See LICENSE for details. ## import re from .parser.grammar import keywords _re_ident = re.compile(r'(?:[^\W\d]|\$)(?:\w|\$)*') def quote_literal(text): return "'" + text.replace("'", R"\'") + "'" def dollar_quote_literal(text): quote = '$$' qq = 0 while quote in text: if qq % 16 < 10: qq += 10 - qq % 16 quote = '${:x}$'.format(qq)[::-1] qq += 1 return quote + text + quote def disambiguate_identifier(text): if (keywords.edge_schema_keywords.get(text) or not _re_ident.fullmatch(text)): return '`{}`'.format(text) else: return text
<commit_before><commit_msg>Fix missing quoting module for schema.<commit_after>## # Copyright (c) 2016 MagicStack Inc. # All rights reserved. # # See LICENSE for details. ## import re from .parser.grammar import keywords _re_ident = re.compile(r'(?:[^\W\d]|\$)(?:\w|\$)*') def quote_literal(text): return "'" + text.replace("'", R"\'") + "'" def dollar_quote_literal(text): quote = '$$' qq = 0 while quote in text: if qq % 16 < 10: qq += 10 - qq % 16 quote = '${:x}$'.format(qq)[::-1] qq += 1 return quote + text + quote def disambiguate_identifier(text): if (keywords.edge_schema_keywords.get(text) or not _re_ident.fullmatch(text)): return '`{}`'.format(text) else: return text
3e364fa31d7659692ed3c6a2c4bd3387a336524e
nettests/examples/example_dnst.py
nettests/examples/example_dnst.py
from ooni.templates.dnst import DNSTest class ExampleDNSTest(DNSTest): def test_a_lookup(self): def gotResult(result): # Result is an array containing all the A record lookup results print result d = self.performALookup('torproject.org', ('8.8.8.8', 53)) d.addCallback(gotResult) return d
Add example usage of DNS Test Template
Add example usage of DNS Test Template
Python
bsd-2-clause
Karthikeyan-kkk/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe
Add example usage of DNS Test Template
from ooni.templates.dnst import DNSTest class ExampleDNSTest(DNSTest): def test_a_lookup(self): def gotResult(result): # Result is an array containing all the A record lookup results print result d = self.performALookup('torproject.org', ('8.8.8.8', 53)) d.addCallback(gotResult) return d
<commit_before><commit_msg>Add example usage of DNS Test Template<commit_after>
from ooni.templates.dnst import DNSTest class ExampleDNSTest(DNSTest): def test_a_lookup(self): def gotResult(result): # Result is an array containing all the A record lookup results print result d = self.performALookup('torproject.org', ('8.8.8.8', 53)) d.addCallback(gotResult) return d
Add example usage of DNS Test Templatefrom ooni.templates.dnst import DNSTest class ExampleDNSTest(DNSTest): def test_a_lookup(self): def gotResult(result): # Result is an array containing all the A record lookup results print result d = self.performALookup('torproject.org', ('8.8.8.8', 53)) d.addCallback(gotResult) return d
<commit_before><commit_msg>Add example usage of DNS Test Template<commit_after>from ooni.templates.dnst import DNSTest class ExampleDNSTest(DNSTest): def test_a_lookup(self): def gotResult(result): # Result is an array containing all the A record lookup results print result d = self.performALookup('torproject.org', ('8.8.8.8', 53)) d.addCallback(gotResult) return d
4428fb8dc8e81d9b3ff32c5d93e79c431434e4d3
utils/nflc-get-categories.py
utils/nflc-get-categories.py
#!/usr/bin/env python3 import argparse import json from urllib.request import urlopen def get_data(domain): response = urlopen('http://{}/media/nflc-playlist-video.json'.format(domain)).read() return json.loads(response.decode('utf-8')) def main(): parser = argparse.ArgumentParser(description='Get the category names and IDs for a NFLC site') parser.add_argument('domain', type=str, nargs=1, help='Domain name to query for') args = parser.parse_args() data = get_data(args.domain[0]) result = {} strip_left = [ 'Podcast - ', 'Video - Show - ', 'Video - Shows - ', 'Video - ', 'Videos - Show - ', 'Videos - ', ] for category_id, category in data.items(): name = category['name'] for strip in strip_left: if name.startswith(strip): name = name[(len(strip)):] result[name.strip()] = category_id for category_name in sorted(result): print('({}, "{}"),'.format(result[category_name], category_name)) if __name__ == '__main__': main()
Add utility to get categories and their IDs from NFLC sites
Add utility to get categories and their IDs from NFLC sites
Python
mit
Tenzer/plugin.video.nfl-teams
Add utility to get categories and their IDs from NFLC sites
#!/usr/bin/env python3 import argparse import json from urllib.request import urlopen def get_data(domain): response = urlopen('http://{}/media/nflc-playlist-video.json'.format(domain)).read() return json.loads(response.decode('utf-8')) def main(): parser = argparse.ArgumentParser(description='Get the category names and IDs for a NFLC site') parser.add_argument('domain', type=str, nargs=1, help='Domain name to query for') args = parser.parse_args() data = get_data(args.domain[0]) result = {} strip_left = [ 'Podcast - ', 'Video - Show - ', 'Video - Shows - ', 'Video - ', 'Videos - Show - ', 'Videos - ', ] for category_id, category in data.items(): name = category['name'] for strip in strip_left: if name.startswith(strip): name = name[(len(strip)):] result[name.strip()] = category_id for category_name in sorted(result): print('({}, "{}"),'.format(result[category_name], category_name)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add utility to get categories and their IDs from NFLC sites<commit_after>
#!/usr/bin/env python3 import argparse import json from urllib.request import urlopen def get_data(domain): response = urlopen('http://{}/media/nflc-playlist-video.json'.format(domain)).read() return json.loads(response.decode('utf-8')) def main(): parser = argparse.ArgumentParser(description='Get the category names and IDs for a NFLC site') parser.add_argument('domain', type=str, nargs=1, help='Domain name to query for') args = parser.parse_args() data = get_data(args.domain[0]) result = {} strip_left = [ 'Podcast - ', 'Video - Show - ', 'Video - Shows - ', 'Video - ', 'Videos - Show - ', 'Videos - ', ] for category_id, category in data.items(): name = category['name'] for strip in strip_left: if name.startswith(strip): name = name[(len(strip)):] result[name.strip()] = category_id for category_name in sorted(result): print('({}, "{}"),'.format(result[category_name], category_name)) if __name__ == '__main__': main()
Add utility to get categories and their IDs from NFLC sites#!/usr/bin/env python3 import argparse import json from urllib.request import urlopen def get_data(domain): response = urlopen('http://{}/media/nflc-playlist-video.json'.format(domain)).read() return json.loads(response.decode('utf-8')) def main(): parser = argparse.ArgumentParser(description='Get the category names and IDs for a NFLC site') parser.add_argument('domain', type=str, nargs=1, help='Domain name to query for') args = parser.parse_args() data = get_data(args.domain[0]) result = {} strip_left = [ 'Podcast - ', 'Video - Show - ', 'Video - Shows - ', 'Video - ', 'Videos - Show - ', 'Videos - ', ] for category_id, category in data.items(): name = category['name'] for strip in strip_left: if name.startswith(strip): name = name[(len(strip)):] result[name.strip()] = category_id for category_name in sorted(result): print('({}, "{}"),'.format(result[category_name], category_name)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add utility to get categories and their IDs from NFLC sites<commit_after>#!/usr/bin/env python3 import argparse import json from urllib.request import urlopen def get_data(domain): response = urlopen('http://{}/media/nflc-playlist-video.json'.format(domain)).read() return json.loads(response.decode('utf-8')) def main(): parser = argparse.ArgumentParser(description='Get the category names and IDs for a NFLC site') parser.add_argument('domain', type=str, nargs=1, help='Domain name to query for') args = parser.parse_args() data = get_data(args.domain[0]) result = {} strip_left = [ 'Podcast - ', 'Video - Show - ', 'Video - Shows - ', 'Video - ', 'Videos - Show - ', 'Videos - ', ] for category_id, category in data.items(): name = category['name'] for strip in strip_left: if name.startswith(strip): name = name[(len(strip)):] result[name.strip()] = category_id for category_name in sorted(result): print('({}, "{}"),'.format(result[category_name], category_name)) if __name__ == '__main__': main()
7d52ef0136a3984da8d14db133220c1d4d1ab16e
zmq_io/in_pifacedigitalio.py
zmq_io/in_pifacedigitalio.py
import argparse import json from datetime import datetime import pifacedigitalio as pfdio from twisted.internet import reactor from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection gpio = pfdio.PiFaceDigital() def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pull_uri") return p.parse_args() def handle_input(msg): """ Called when a ZeroMQ PULL message is received. """ print "Received input at %s" % datetime.now() try: msg = json.loads(msg[0]) except ValueError as e: print "Message was not JSON formatted, discarding: %s" % e return for pin, value in msg.items(): print "Setting output pin %s to %s" % (pin, value) try: gpio.output_pins[int(pin)].value = int(value) except KeyError: print "No output pin with index of %s" % pin except ValueError: print "Output pin values must evaluate to integers, not %s" % value if __name__ == "__main__": args = parse_args() zf = ZmqFactory() e = ZmqEndpoint("bind", args.pull_uri) s = ZmqPullConnection(zf, e) s.onPull = handle_input reactor.run()
Add inbound pifacedigitalio module which uses twisted
Add inbound pifacedigitalio module which uses twisted
Python
unlicense
flyte/zmq-io-modules,flyte/zmq-io-modules
Add inbound pifacedigitalio module which uses twisted
import argparse import json from datetime import datetime import pifacedigitalio as pfdio from twisted.internet import reactor from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection gpio = pfdio.PiFaceDigital() def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pull_uri") return p.parse_args() def handle_input(msg): """ Called when a ZeroMQ PULL message is received. """ print "Received input at %s" % datetime.now() try: msg = json.loads(msg[0]) except ValueError as e: print "Message was not JSON formatted, discarding: %s" % e return for pin, value in msg.items(): print "Setting output pin %s to %s" % (pin, value) try: gpio.output_pins[int(pin)].value = int(value) except KeyError: print "No output pin with index of %s" % pin except ValueError: print "Output pin values must evaluate to integers, not %s" % value if __name__ == "__main__": args = parse_args() zf = ZmqFactory() e = ZmqEndpoint("bind", args.pull_uri) s = ZmqPullConnection(zf, e) s.onPull = handle_input reactor.run()
<commit_before><commit_msg>Add inbound pifacedigitalio module which uses twisted<commit_after>
import argparse import json from datetime import datetime import pifacedigitalio as pfdio from twisted.internet import reactor from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection gpio = pfdio.PiFaceDigital() def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pull_uri") return p.parse_args() def handle_input(msg): """ Called when a ZeroMQ PULL message is received. """ print "Received input at %s" % datetime.now() try: msg = json.loads(msg[0]) except ValueError as e: print "Message was not JSON formatted, discarding: %s" % e return for pin, value in msg.items(): print "Setting output pin %s to %s" % (pin, value) try: gpio.output_pins[int(pin)].value = int(value) except KeyError: print "No output pin with index of %s" % pin except ValueError: print "Output pin values must evaluate to integers, not %s" % value if __name__ == "__main__": args = parse_args() zf = ZmqFactory() e = ZmqEndpoint("bind", args.pull_uri) s = ZmqPullConnection(zf, e) s.onPull = handle_input reactor.run()
Add inbound pifacedigitalio module which uses twistedimport argparse import json from datetime import datetime import pifacedigitalio as pfdio from twisted.internet import reactor from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection gpio = pfdio.PiFaceDigital() def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pull_uri") return p.parse_args() def handle_input(msg): """ Called when a ZeroMQ PULL message is received. """ print "Received input at %s" % datetime.now() try: msg = json.loads(msg[0]) except ValueError as e: print "Message was not JSON formatted, discarding: %s" % e return for pin, value in msg.items(): print "Setting output pin %s to %s" % (pin, value) try: gpio.output_pins[int(pin)].value = int(value) except KeyError: print "No output pin with index of %s" % pin except ValueError: print "Output pin values must evaluate to integers, not %s" % value if __name__ == "__main__": args = parse_args() zf = ZmqFactory() e = ZmqEndpoint("bind", args.pull_uri) s = ZmqPullConnection(zf, e) s.onPull = handle_input reactor.run()
<commit_before><commit_msg>Add inbound pifacedigitalio module which uses twisted<commit_after>import argparse import json from datetime import datetime import pifacedigitalio as pfdio from twisted.internet import reactor from txzmq import ZmqEndpoint, ZmqFactory, ZmqPullConnection gpio = pfdio.PiFaceDigital() def parse_args(): """ Specify and parse command line arguments. """ p = argparse.ArgumentParser() p.add_argument("pull_uri") return p.parse_args() def handle_input(msg): """ Called when a ZeroMQ PULL message is received. """ print "Received input at %s" % datetime.now() try: msg = json.loads(msg[0]) except ValueError as e: print "Message was not JSON formatted, discarding: %s" % e return for pin, value in msg.items(): print "Setting output pin %s to %s" % (pin, value) try: gpio.output_pins[int(pin)].value = int(value) except KeyError: print "No output pin with index of %s" % pin except ValueError: print "Output pin values must evaluate to integers, not %s" % value if __name__ == "__main__": args = parse_args() zf = ZmqFactory() e = ZmqEndpoint("bind", args.pull_uri) s = ZmqPullConnection(zf, e) s.onPull = handle_input reactor.run()
30dbded69a535d4730fe8bb9c8706f809f841e9c
src/keybar/tests/web/test_views.py
src/keybar/tests/web/test_views.py
import pytest @pytest.mark.django_db class TestIndexView(object): def test_index(self, client): response = client.get('/') assert response.template_name == ['keybar/web/index.html']
Add test for index view
Add test for index view
Python
bsd-3-clause
keybar/keybar
Add test for index view
import pytest @pytest.mark.django_db class TestIndexView(object): def test_index(self, client): response = client.get('/') assert response.template_name == ['keybar/web/index.html']
<commit_before><commit_msg>Add test for index view<commit_after>
import pytest @pytest.mark.django_db class TestIndexView(object): def test_index(self, client): response = client.get('/') assert response.template_name == ['keybar/web/index.html']
Add test for index viewimport pytest @pytest.mark.django_db class TestIndexView(object): def test_index(self, client): response = client.get('/') assert response.template_name == ['keybar/web/index.html']
<commit_before><commit_msg>Add test for index view<commit_after>import pytest @pytest.mark.django_db class TestIndexView(object): def test_index(self, client): response = client.get('/') assert response.template_name == ['keybar/web/index.html']
017ed0c2e8edf0599fa27dcc281b2c93e3ddd67a
tests/test_utils.py
tests/test_utils.py
import unittest class TestCoordsByParent(unittest.TestCase): def test_empty(self): from tilequeue.utils import CoordsByParent cbp = CoordsByParent(10) count = 0 for key, coords in cbp: count += 1 self.assertEquals(0, count) def test_lower_zooms_not_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) low_zoom_coords = [(9, 0, 0), (9, 0, 1), (9, 1, 0), (9, 1, 1)] for z, x, y in low_zoom_coords: coord = Coordinate(zoom=z, column=x, row=y) cbp.add(coord) count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) count += 1 self.assertEquals(len(low_zoom_coords), count) def test_higher_zooms_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) def _c(z, x, y): return Coordinate(zoom=z, column=x, row=y) groups = { _c(10, 0, 0): [_c(10, 0, 0), _c(11, 0, 0), _c(11, 0, 1)], _c(10, 1, 1): [_c(11, 2, 2), _c(11, 3, 3), _c(12, 4, 4)], } for coords in groups.itervalues(): for coord in coords: cbp.add(coord) count = 0 for key, coords in cbp: self.assertIn(key, groups) self.assertEquals(set(groups[key]), set(coords)) count += 1 self.assertEquals(len(groups), count) def test_with_extra_data(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) coord = Coordinate(zoom=10, column=0, row=0) cbp.add(coord, 'foo', 'bar') count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) self.assertEquals((coord, 'foo', 'bar'), coords[0]) count += 1 self.assertEquals(1, count)
Add test for parent grouping function.
Add test for parent grouping function.
Python
mit
tilezen/tilequeue,mapzen/tilequeue
Add test for parent grouping function.
import unittest class TestCoordsByParent(unittest.TestCase): def test_empty(self): from tilequeue.utils import CoordsByParent cbp = CoordsByParent(10) count = 0 for key, coords in cbp: count += 1 self.assertEquals(0, count) def test_lower_zooms_not_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) low_zoom_coords = [(9, 0, 0), (9, 0, 1), (9, 1, 0), (9, 1, 1)] for z, x, y in low_zoom_coords: coord = Coordinate(zoom=z, column=x, row=y) cbp.add(coord) count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) count += 1 self.assertEquals(len(low_zoom_coords), count) def test_higher_zooms_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) def _c(z, x, y): return Coordinate(zoom=z, column=x, row=y) groups = { _c(10, 0, 0): [_c(10, 0, 0), _c(11, 0, 0), _c(11, 0, 1)], _c(10, 1, 1): [_c(11, 2, 2), _c(11, 3, 3), _c(12, 4, 4)], } for coords in groups.itervalues(): for coord in coords: cbp.add(coord) count = 0 for key, coords in cbp: self.assertIn(key, groups) self.assertEquals(set(groups[key]), set(coords)) count += 1 self.assertEquals(len(groups), count) def test_with_extra_data(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) coord = Coordinate(zoom=10, column=0, row=0) cbp.add(coord, 'foo', 'bar') count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) self.assertEquals((coord, 'foo', 'bar'), coords[0]) count += 1 self.assertEquals(1, count)
<commit_before><commit_msg>Add test for parent grouping function.<commit_after>
import unittest class TestCoordsByParent(unittest.TestCase): def test_empty(self): from tilequeue.utils import CoordsByParent cbp = CoordsByParent(10) count = 0 for key, coords in cbp: count += 1 self.assertEquals(0, count) def test_lower_zooms_not_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) low_zoom_coords = [(9, 0, 0), (9, 0, 1), (9, 1, 0), (9, 1, 1)] for z, x, y in low_zoom_coords: coord = Coordinate(zoom=z, column=x, row=y) cbp.add(coord) count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) count += 1 self.assertEquals(len(low_zoom_coords), count) def test_higher_zooms_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) def _c(z, x, y): return Coordinate(zoom=z, column=x, row=y) groups = { _c(10, 0, 0): [_c(10, 0, 0), _c(11, 0, 0), _c(11, 0, 1)], _c(10, 1, 1): [_c(11, 2, 2), _c(11, 3, 3), _c(12, 4, 4)], } for coords in groups.itervalues(): for coord in coords: cbp.add(coord) count = 0 for key, coords in cbp: self.assertIn(key, groups) self.assertEquals(set(groups[key]), set(coords)) count += 1 self.assertEquals(len(groups), count) def test_with_extra_data(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) coord = Coordinate(zoom=10, column=0, row=0) cbp.add(coord, 'foo', 'bar') count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) self.assertEquals((coord, 'foo', 'bar'), coords[0]) count += 1 self.assertEquals(1, count)
Add test for parent grouping function.import unittest class TestCoordsByParent(unittest.TestCase): def test_empty(self): from tilequeue.utils import CoordsByParent cbp = CoordsByParent(10) count = 0 for key, coords in cbp: count += 1 self.assertEquals(0, count) def test_lower_zooms_not_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) low_zoom_coords = [(9, 0, 0), (9, 0, 1), (9, 1, 0), (9, 1, 1)] for z, x, y in low_zoom_coords: coord = Coordinate(zoom=z, column=x, row=y) cbp.add(coord) count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) count += 1 self.assertEquals(len(low_zoom_coords), count) def test_higher_zooms_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) def _c(z, x, y): return Coordinate(zoom=z, column=x, row=y) groups = { _c(10, 0, 0): [_c(10, 0, 0), _c(11, 0, 0), _c(11, 0, 1)], _c(10, 1, 1): [_c(11, 2, 2), _c(11, 3, 3), _c(12, 4, 4)], } for coords in groups.itervalues(): for coord in coords: cbp.add(coord) count = 0 for key, coords in cbp: self.assertIn(key, groups) self.assertEquals(set(groups[key]), set(coords)) count += 1 self.assertEquals(len(groups), count) def test_with_extra_data(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) coord = Coordinate(zoom=10, column=0, row=0) cbp.add(coord, 'foo', 'bar') count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) self.assertEquals((coord, 'foo', 'bar'), coords[0]) count += 1 self.assertEquals(1, count)
<commit_before><commit_msg>Add test for parent grouping function.<commit_after>import unittest class TestCoordsByParent(unittest.TestCase): def test_empty(self): from tilequeue.utils import CoordsByParent cbp = CoordsByParent(10) count = 0 for key, coords in cbp: count += 1 self.assertEquals(0, count) def test_lower_zooms_not_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) low_zoom_coords = [(9, 0, 0), (9, 0, 1), (9, 1, 0), (9, 1, 1)] for z, x, y in low_zoom_coords: coord = Coordinate(zoom=z, column=x, row=y) cbp.add(coord) count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) count += 1 self.assertEquals(len(low_zoom_coords), count) def test_higher_zooms_grouped(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) def _c(z, x, y): return Coordinate(zoom=z, column=x, row=y) groups = { _c(10, 0, 0): [_c(10, 0, 0), _c(11, 0, 0), _c(11, 0, 1)], _c(10, 1, 1): [_c(11, 2, 2), _c(11, 3, 3), _c(12, 4, 4)], } for coords in groups.itervalues(): for coord in coords: cbp.add(coord) count = 0 for key, coords in cbp: self.assertIn(key, groups) self.assertEquals(set(groups[key]), set(coords)) count += 1 self.assertEquals(len(groups), count) def test_with_extra_data(self): from tilequeue.utils import CoordsByParent from ModestMaps.Core import Coordinate cbp = CoordsByParent(10) coord = Coordinate(zoom=10, column=0, row=0) cbp.add(coord, 'foo', 'bar') count = 0 for key, coords in cbp: self.assertEquals(1, len(coords)) self.assertEquals((coord, 'foo', 'bar'), coords[0]) count += 1 self.assertEquals(1, count)
0deb3f006746d86ed4ed56ccfa2a93ac53d0d968
centerpoints/iterated_tverberg.py
centerpoints/iterated_tverberg.py
# -*- coding: utf-8 -*- import numpy as np from .interfaces import CenterpointAlgo class IteratedTverberg(CenterpointAlgo): def centerpoint(self, points): pass def _prune(alphas, hull): # @see http://www.math.cornell.edu/~eranevo/homepage/ConvNote.pdf # http://en.wikipedia.org/wiki/Carath%C3%A9odory's_theorem_(convex_hull) n, d = hull.shape # Anchor: d + 1 hull points can't be reduced any further if n <= d + 1: return alphas, hull # Choose d + 2 hull points _hull = hull[:d + 2] _alphas = alphas[:d + 2] # Create linearly dependent vectors ld = _hull[1:] - _hull[1] # Solve β * ld = 0 _, _, V = np.linalg.svd(ld.T) _betas = V.T[:, -1] # Calculate β_1 in a way to assure Sum β_i = 0 _beta1 = - np.sum(_betas) betas = np.hstack((_beta1, _betas)) # Calculate the adjusted alphas and determine the minimum. lambdas = _alphas / betas lambda_min_idx = np.argmin(lambdas) # Adjust the α's of the original point alphas = alphas[:] alphas[:d + 2] = _alphas - (lambdas[lambda_min_idx] * betas) idx = np.arange(n) != lambda_min_idx hull = hull[idx] alphas = alphas[idx] return _prune(alphas, hull) # def _convex_combination(point, hull): # n, d = hull.shape # # a = np.vstack((hull.T, np.ones(n))) # b = np.hstack((point, np.ones(1))) # x, residues, rank, s = np.linalg.lstsq(a,b) # # return x
Add incomplete _pruning function for IteratedTverberg.
Add incomplete _pruning function for IteratedTverberg.
Python
mit
fu-berlin-swp-2014/center-points,fu-berlin-swp-2014/center-points
Add incomplete _pruning function for IteratedTverberg.
# -*- coding: utf-8 -*- import numpy as np from .interfaces import CenterpointAlgo class IteratedTverberg(CenterpointAlgo): def centerpoint(self, points): pass def _prune(alphas, hull): # @see http://www.math.cornell.edu/~eranevo/homepage/ConvNote.pdf # http://en.wikipedia.org/wiki/Carath%C3%A9odory's_theorem_(convex_hull) n, d = hull.shape # Anchor: d + 1 hull points can't be reduced any further if n <= d + 1: return alphas, hull # Choose d + 2 hull points _hull = hull[:d + 2] _alphas = alphas[:d + 2] # Create linearly dependent vectors ld = _hull[1:] - _hull[1] # Solve β * ld = 0 _, _, V = np.linalg.svd(ld.T) _betas = V.T[:, -1] # Calculate β_1 in a way to assure Sum β_i = 0 _beta1 = - np.sum(_betas) betas = np.hstack((_beta1, _betas)) # Calculate the adjusted alphas and determine the minimum. lambdas = _alphas / betas lambda_min_idx = np.argmin(lambdas) # Adjust the α's of the original point alphas = alphas[:] alphas[:d + 2] = _alphas - (lambdas[lambda_min_idx] * betas) idx = np.arange(n) != lambda_min_idx hull = hull[idx] alphas = alphas[idx] return _prune(alphas, hull) # def _convex_combination(point, hull): # n, d = hull.shape # # a = np.vstack((hull.T, np.ones(n))) # b = np.hstack((point, np.ones(1))) # x, residues, rank, s = np.linalg.lstsq(a,b) # # return x
<commit_before><commit_msg>Add incomplete _pruning function for IteratedTverberg.<commit_after>
# -*- coding: utf-8 -*- import numpy as np from .interfaces import CenterpointAlgo class IteratedTverberg(CenterpointAlgo): def centerpoint(self, points): pass def _prune(alphas, hull): # @see http://www.math.cornell.edu/~eranevo/homepage/ConvNote.pdf # http://en.wikipedia.org/wiki/Carath%C3%A9odory's_theorem_(convex_hull) n, d = hull.shape # Anchor: d + 1 hull points can't be reduced any further if n <= d + 1: return alphas, hull # Choose d + 2 hull points _hull = hull[:d + 2] _alphas = alphas[:d + 2] # Create linearly dependent vectors ld = _hull[1:] - _hull[1] # Solve β * ld = 0 _, _, V = np.linalg.svd(ld.T) _betas = V.T[:, -1] # Calculate β_1 in a way to assure Sum β_i = 0 _beta1 = - np.sum(_betas) betas = np.hstack((_beta1, _betas)) # Calculate the adjusted alphas and determine the minimum. lambdas = _alphas / betas lambda_min_idx = np.argmin(lambdas) # Adjust the α's of the original point alphas = alphas[:] alphas[:d + 2] = _alphas - (lambdas[lambda_min_idx] * betas) idx = np.arange(n) != lambda_min_idx hull = hull[idx] alphas = alphas[idx] return _prune(alphas, hull) # def _convex_combination(point, hull): # n, d = hull.shape # # a = np.vstack((hull.T, np.ones(n))) # b = np.hstack((point, np.ones(1))) # x, residues, rank, s = np.linalg.lstsq(a,b) # # return x
Add incomplete _pruning function for IteratedTverberg.# -*- coding: utf-8 -*- import numpy as np from .interfaces import CenterpointAlgo class IteratedTverberg(CenterpointAlgo): def centerpoint(self, points): pass def _prune(alphas, hull): # @see http://www.math.cornell.edu/~eranevo/homepage/ConvNote.pdf # http://en.wikipedia.org/wiki/Carath%C3%A9odory's_theorem_(convex_hull) n, d = hull.shape # Anchor: d + 1 hull points can't be reduced any further if n <= d + 1: return alphas, hull # Choose d + 2 hull points _hull = hull[:d + 2] _alphas = alphas[:d + 2] # Create linearly dependent vectors ld = _hull[1:] - _hull[1] # Solve β * ld = 0 _, _, V = np.linalg.svd(ld.T) _betas = V.T[:, -1] # Calculate β_1 in a way to assure Sum β_i = 0 _beta1 = - np.sum(_betas) betas = np.hstack((_beta1, _betas)) # Calculate the adjusted alphas and determine the minimum. lambdas = _alphas / betas lambda_min_idx = np.argmin(lambdas) # Adjust the α's of the original point alphas = alphas[:] alphas[:d + 2] = _alphas - (lambdas[lambda_min_idx] * betas) idx = np.arange(n) != lambda_min_idx hull = hull[idx] alphas = alphas[idx] return _prune(alphas, hull) # def _convex_combination(point, hull): # n, d = hull.shape # # a = np.vstack((hull.T, np.ones(n))) # b = np.hstack((point, np.ones(1))) # x, residues, rank, s = np.linalg.lstsq(a,b) # # return x
<commit_before><commit_msg>Add incomplete _pruning function for IteratedTverberg.<commit_after># -*- coding: utf-8 -*- import numpy as np from .interfaces import CenterpointAlgo class IteratedTverberg(CenterpointAlgo): def centerpoint(self, points): pass def _prune(alphas, hull): # @see http://www.math.cornell.edu/~eranevo/homepage/ConvNote.pdf # http://en.wikipedia.org/wiki/Carath%C3%A9odory's_theorem_(convex_hull) n, d = hull.shape # Anchor: d + 1 hull points can't be reduced any further if n <= d + 1: return alphas, hull # Choose d + 2 hull points _hull = hull[:d + 2] _alphas = alphas[:d + 2] # Create linearly dependent vectors ld = _hull[1:] - _hull[1] # Solve β * ld = 0 _, _, V = np.linalg.svd(ld.T) _betas = V.T[:, -1] # Calculate β_1 in a way to assure Sum β_i = 0 _beta1 = - np.sum(_betas) betas = np.hstack((_beta1, _betas)) # Calculate the adjusted alphas and determine the minimum. lambdas = _alphas / betas lambda_min_idx = np.argmin(lambdas) # Adjust the α's of the original point alphas = alphas[:] alphas[:d + 2] = _alphas - (lambdas[lambda_min_idx] * betas) idx = np.arange(n) != lambda_min_idx hull = hull[idx] alphas = alphas[idx] return _prune(alphas, hull) # def _convex_combination(point, hull): # n, d = hull.shape # # a = np.vstack((hull.T, np.ones(n))) # b = np.hstack((point, np.ones(1))) # x, residues, rank, s = np.linalg.lstsq(a,b) # # return x
0f0a23268368d5313f041e649fe58e3d123ccc64
pyslang/tests/test_extractcomments.py
pyslang/tests/test_extractcomments.py
from pyslang import * testfile = """ //! Module description //! ***this is code*** sample //! | Tables | Are | Cool | //! |----------|:------------:|------:| //! | col 1 is | left-aligned | $1600 | module gray_counter ( out , // counter out clk , //! clock clk1 , //! clock sample rst //! **active high reset** ); input clk, clk1, rst; output [7:0] out; wire [7:0] out; reg [7:0] count; endmodule """ def test_extractcomments(): tree = SyntaxTree.fromText(testfile) assert tree.root.kind == SyntaxKind.ModuleDeclaration moduleComments = [] for t in tree.root.getFirstToken().trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): moduleComments.append(comment[3:].strip()) portComments = {} portList = tree.root.header.ports lastPort = None def getLeadingComments(tok): if lastPort is not None: for t in tok.trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): portComments[lastPort].append(comment[3:].strip()) elif t.kind == TriviaKind.EndOfLine: break if portList is not None: for port in portList.ports: if isinstance(port, ImplicitNonAnsiPortSyntax): tok = port.getFirstToken() getLeadingComments(tok) portName = tok.value portComments[portName] = [] lastPort = portName getLeadingComments(portList.closeParen) assert len(moduleComments) == 5 assert moduleComments[4] == "| col 1 is | left-aligned | $1600 |" for k, _ in portComments.copy().items(): if len(portComments[k]) == 0: del portComments[k] assert len(portComments) == 3 assert portComments["rst"][0] == "**active high reset**"
Add simple example for extracting comments via pyslang
Add simple example for extracting comments via pyslang
Python
mit
MikePopoloski/slang,MikePopoloski/slang
Add simple example for extracting comments via pyslang
from pyslang import * testfile = """ //! Module description //! ***this is code*** sample //! | Tables | Are | Cool | //! |----------|:------------:|------:| //! | col 1 is | left-aligned | $1600 | module gray_counter ( out , // counter out clk , //! clock clk1 , //! clock sample rst //! **active high reset** ); input clk, clk1, rst; output [7:0] out; wire [7:0] out; reg [7:0] count; endmodule """ def test_extractcomments(): tree = SyntaxTree.fromText(testfile) assert tree.root.kind == SyntaxKind.ModuleDeclaration moduleComments = [] for t in tree.root.getFirstToken().trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): moduleComments.append(comment[3:].strip()) portComments = {} portList = tree.root.header.ports lastPort = None def getLeadingComments(tok): if lastPort is not None: for t in tok.trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): portComments[lastPort].append(comment[3:].strip()) elif t.kind == TriviaKind.EndOfLine: break if portList is not None: for port in portList.ports: if isinstance(port, ImplicitNonAnsiPortSyntax): tok = port.getFirstToken() getLeadingComments(tok) portName = tok.value portComments[portName] = [] lastPort = portName getLeadingComments(portList.closeParen) assert len(moduleComments) == 5 assert moduleComments[4] == "| col 1 is | left-aligned | $1600 |" for k, _ in portComments.copy().items(): if len(portComments[k]) == 0: del portComments[k] assert len(portComments) == 3 assert portComments["rst"][0] == "**active high reset**"
<commit_before><commit_msg>Add simple example for extracting comments via pyslang<commit_after>
from pyslang import * testfile = """ //! Module description //! ***this is code*** sample //! | Tables | Are | Cool | //! |----------|:------------:|------:| //! | col 1 is | left-aligned | $1600 | module gray_counter ( out , // counter out clk , //! clock clk1 , //! clock sample rst //! **active high reset** ); input clk, clk1, rst; output [7:0] out; wire [7:0] out; reg [7:0] count; endmodule """ def test_extractcomments(): tree = SyntaxTree.fromText(testfile) assert tree.root.kind == SyntaxKind.ModuleDeclaration moduleComments = [] for t in tree.root.getFirstToken().trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): moduleComments.append(comment[3:].strip()) portComments = {} portList = tree.root.header.ports lastPort = None def getLeadingComments(tok): if lastPort is not None: for t in tok.trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): portComments[lastPort].append(comment[3:].strip()) elif t.kind == TriviaKind.EndOfLine: break if portList is not None: for port in portList.ports: if isinstance(port, ImplicitNonAnsiPortSyntax): tok = port.getFirstToken() getLeadingComments(tok) portName = tok.value portComments[portName] = [] lastPort = portName getLeadingComments(portList.closeParen) assert len(moduleComments) == 5 assert moduleComments[4] == "| col 1 is | left-aligned | $1600 |" for k, _ in portComments.copy().items(): if len(portComments[k]) == 0: del portComments[k] assert len(portComments) == 3 assert portComments["rst"][0] == "**active high reset**"
Add simple example for extracting comments via pyslangfrom pyslang import * testfile = """ //! Module description //! ***this is code*** sample //! | Tables | Are | Cool | //! |----------|:------------:|------:| //! | col 1 is | left-aligned | $1600 | module gray_counter ( out , // counter out clk , //! clock clk1 , //! clock sample rst //! **active high reset** ); input clk, clk1, rst; output [7:0] out; wire [7:0] out; reg [7:0] count; endmodule """ def test_extractcomments(): tree = SyntaxTree.fromText(testfile) assert tree.root.kind == SyntaxKind.ModuleDeclaration moduleComments = [] for t in tree.root.getFirstToken().trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): moduleComments.append(comment[3:].strip()) portComments = {} portList = tree.root.header.ports lastPort = None def getLeadingComments(tok): if lastPort is not None: for t in tok.trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): portComments[lastPort].append(comment[3:].strip()) elif t.kind == TriviaKind.EndOfLine: break if portList is not None: for port in portList.ports: if isinstance(port, ImplicitNonAnsiPortSyntax): tok = port.getFirstToken() getLeadingComments(tok) portName = tok.value portComments[portName] = [] lastPort = portName getLeadingComments(portList.closeParen) assert len(moduleComments) == 5 assert moduleComments[4] == "| col 1 is | left-aligned | $1600 |" for k, _ in portComments.copy().items(): if len(portComments[k]) == 0: del portComments[k] assert len(portComments) == 3 assert portComments["rst"][0] == "**active high reset**"
<commit_before><commit_msg>Add simple example for extracting comments via pyslang<commit_after>from pyslang import * testfile = """ //! Module description //! ***this is code*** sample //! | Tables | Are | Cool | //! |----------|:------------:|------:| //! | col 1 is | left-aligned | $1600 | module gray_counter ( out , // counter out clk , //! clock clk1 , //! clock sample rst //! **active high reset** ); input clk, clk1, rst; output [7:0] out; wire [7:0] out; reg [7:0] count; endmodule """ def test_extractcomments(): tree = SyntaxTree.fromText(testfile) assert tree.root.kind == SyntaxKind.ModuleDeclaration moduleComments = [] for t in tree.root.getFirstToken().trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): moduleComments.append(comment[3:].strip()) portComments = {} portList = tree.root.header.ports lastPort = None def getLeadingComments(tok): if lastPort is not None: for t in tok.trivia: if t.kind == TriviaKind.LineComment: comment = t.getRawText() if comment.startswith("//!"): portComments[lastPort].append(comment[3:].strip()) elif t.kind == TriviaKind.EndOfLine: break if portList is not None: for port in portList.ports: if isinstance(port, ImplicitNonAnsiPortSyntax): tok = port.getFirstToken() getLeadingComments(tok) portName = tok.value portComments[portName] = [] lastPort = portName getLeadingComments(portList.closeParen) assert len(moduleComments) == 5 assert moduleComments[4] == "| col 1 is | left-aligned | $1600 |" for k, _ in portComments.copy().items(): if len(portComments[k]) == 0: del portComments[k] assert len(portComments) == 3 assert portComments["rst"][0] == "**active high reset**"
22a5515b9bf3e684706d3dab98f19402ee651c4c
src/nyc_trees/apps/core/migrations/0018_auto_20150318_1233.py
src/nyc_trees/apps/core/migrations/0018_auto_20150318_1233.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import apps.core.models class Migration(migrations.Migration): dependencies = [ ('core', '0017_group_affiliation'), ] operations = [ migrations.AlterField( model_name='group', name='image', field=models.ImageField(null=True, upload_to=apps.core.models._generate_image_filename, blank=True), preserve_default=True, ), ]
Add missing migrations for changed image field
Add missing migrations for changed image field Commit fda90cf modified the image field of Group, but neglected to inlude migrations.
Python
agpl-3.0
kdeloach/nyc-trees,RickMohr/nyc-trees,azavea/nyc-trees,RickMohr/nyc-trees,maurizi/nyc-trees,kdeloach/nyc-trees,azavea/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,maurizi/nyc-trees,kdeloach/nyc-trees,RickMohr/nyc-trees,maurizi/nyc-trees,azavea/nyc-trees,kdeloach/nyc-trees,RickMohr/nyc-trees,azavea/nyc-trees
Add missing migrations for changed image field Commit fda90cf modified the image field of Group, but neglected to inlude migrations.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import apps.core.models class Migration(migrations.Migration): dependencies = [ ('core', '0017_group_affiliation'), ] operations = [ migrations.AlterField( model_name='group', name='image', field=models.ImageField(null=True, upload_to=apps.core.models._generate_image_filename, blank=True), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migrations for changed image field Commit fda90cf modified the image field of Group, but neglected to inlude migrations.<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import apps.core.models class Migration(migrations.Migration): dependencies = [ ('core', '0017_group_affiliation'), ] operations = [ migrations.AlterField( model_name='group', name='image', field=models.ImageField(null=True, upload_to=apps.core.models._generate_image_filename, blank=True), preserve_default=True, ), ]
Add missing migrations for changed image field Commit fda90cf modified the image field of Group, but neglected to inlude migrations.# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import apps.core.models class Migration(migrations.Migration): dependencies = [ ('core', '0017_group_affiliation'), ] operations = [ migrations.AlterField( model_name='group', name='image', field=models.ImageField(null=True, upload_to=apps.core.models._generate_image_filename, blank=True), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migrations for changed image field Commit fda90cf modified the image field of Group, but neglected to inlude migrations.<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import apps.core.models class Migration(migrations.Migration): dependencies = [ ('core', '0017_group_affiliation'), ] operations = [ migrations.AlterField( model_name='group', name='image', field=models.ImageField(null=True, upload_to=apps.core.models._generate_image_filename, blank=True), preserve_default=True, ), ]
9f82e6b96bf4702901f86374e8a05c3d550091e7
app/soc/logic/helper/convert_db.py
app/soc/logic/helper/convert_db.py
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts the DB from an old scheme to a new one. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.api import users from django import http from soc.models import user as user_model from soc.logic import accounts from soc.logic.models.user import logic as user_logic def convert_user_accounts(*args, **kwargs): """Converts all current user accounts to normalized form. """ data = user_logic.getAll(user_model.User.all()) for user in data: normalized = accounts.normalizeAccount(user.account) if user.account != normalized: user.account = normalized user.put() return http.HttpResponse('Done')
Add a script to normalize user accounts
Add a script to normalize user accounts Patch by: Sverre Rabbelier
Python
apache-2.0
MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging,MatthewWilkes/mw4068-packaging
Add a script to normalize user accounts Patch by: Sverre Rabbelier
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts the DB from an old scheme to a new one. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.api import users from django import http from soc.models import user as user_model from soc.logic import accounts from soc.logic.models.user import logic as user_logic def convert_user_accounts(*args, **kwargs): """Converts all current user accounts to normalized form. """ data = user_logic.getAll(user_model.User.all()) for user in data: normalized = accounts.normalizeAccount(user.account) if user.account != normalized: user.account = normalized user.put() return http.HttpResponse('Done')
<commit_before><commit_msg>Add a script to normalize user accounts Patch by: Sverre Rabbelier<commit_after>
#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts the DB from an old scheme to a new one. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.api import users from django import http from soc.models import user as user_model from soc.logic import accounts from soc.logic.models.user import logic as user_logic def convert_user_accounts(*args, **kwargs): """Converts all current user accounts to normalized form. """ data = user_logic.getAll(user_model.User.all()) for user in data: normalized = accounts.normalizeAccount(user.account) if user.account != normalized: user.account = normalized user.put() return http.HttpResponse('Done')
Add a script to normalize user accounts Patch by: Sverre Rabbelier#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts the DB from an old scheme to a new one. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.api import users from django import http from soc.models import user as user_model from soc.logic import accounts from soc.logic.models.user import logic as user_logic def convert_user_accounts(*args, **kwargs): """Converts all current user accounts to normalized form. """ data = user_logic.getAll(user_model.User.all()) for user in data: normalized = accounts.normalizeAccount(user.account) if user.account != normalized: user.account = normalized user.put() return http.HttpResponse('Done')
<commit_before><commit_msg>Add a script to normalize user accounts Patch by: Sverre Rabbelier<commit_after>#!/usr/bin/python2.5 # # Copyright 2008 the Melange authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts the DB from an old scheme to a new one. """ __authors__ = [ '"Sverre Rabbelier" <sverre@rabbelier.nl>', ] from google.appengine.api import users from django import http from soc.models import user as user_model from soc.logic import accounts from soc.logic.models.user import logic as user_logic def convert_user_accounts(*args, **kwargs): """Converts all current user accounts to normalized form. """ data = user_logic.getAll(user_model.User.all()) for user in data: normalized = accounts.normalizeAccount(user.account) if user.account != normalized: user.account = normalized user.put() return http.HttpResponse('Done')
736412dc51711b41ea45f993d80d5ae3895306e9
tests/sentry/digests/test_notifications.py
tests/sentry/digests/test_notifications.py
from __future__ import absolute_import from collections import OrderedDict from exam import fixture from sentry.digests import Record from sentry.digests.notifications import ( Notification, event_to_record, rewrite_record, group_records, sort_groups, ) from sentry.testutils import TestCase class RewriteRecordTestCase(TestCase): @fixture def rule(self): return self.event.project.rule_set.all()[0] @fixture def record(self): return event_to_record(self.event, (self.rule,)) def test_success(self): assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={ self.rule.id: self.rule, }, ) == Record( self.record.key, Notification( self.event, [self.rule], ), self.record.timestamp, ) def test_without_group(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={}, rules={ self.rule.id: self.rule, }, ) is None def test_filters_invalid_rules(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={}, ) == Record( self.record.key, Notification(self.event, []), self.record.timestamp, ) class GroupRecordsTestCase(TestCase): @fixture def rule(self): return self.project.rule_set.all()[0] def test_success(self): events = [self.create_event(group=self.group) for _ in xrange(3)] records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events] assert group_records(records) == { self.rule: { self.group: records, }, } class SortRecordsTestCase(TestCase): def test_success(self): rules = list(self.project.rule_set.all()) groups = [self.create_group() for _ in xrange(3)] groups[0].event_count = 10 groups[0].user_count = 4 groups[1].event_count = 5 groups[1].user_count = 2 groups[2].event_count = 5 groups[2].user_count = 1 grouped = { rules[0]: { groups[0]: [], }, rules[1]: { groups[1]: [], groups[2]: [], }, } assert sort_groups(grouped) == OrderedDict(( (rules[1], OrderedDict(( (groups[1], []), (groups[2], []), ))), (rules[0], OrderedDict(( (groups[0], []), ))), ))
Test some of the digest building functions.
Test some of the digest building functions.
Python
bsd-3-clause
zenefits/sentry,daevaorn/sentry,BuildingLink/sentry,beeftornado/sentry,jean/sentry,BuildingLink/sentry,zenefits/sentry,nicholasserra/sentry,JackDanger/sentry,JamesMura/sentry,fotinakis/sentry,zenefits/sentry,daevaorn/sentry,gencer/sentry,alexm92/sentry,ifduyue/sentry,gencer/sentry,ifduyue/sentry,BuildingLink/sentry,mvaled/sentry,imankulov/sentry,nicholasserra/sentry,mvaled/sentry,JamesMura/sentry,mvaled/sentry,mvaled/sentry,JamesMura/sentry,daevaorn/sentry,looker/sentry,jean/sentry,alexm92/sentry,zenefits/sentry,BuildingLink/sentry,zenefits/sentry,JackDanger/sentry,BuildingLink/sentry,fotinakis/sentry,JamesMura/sentry,fotinakis/sentry,gencer/sentry,beeftornado/sentry,imankulov/sentry,mitsuhiko/sentry,ifduyue/sentry,JackDanger/sentry,daevaorn/sentry,beeftornado/sentry,fotinakis/sentry,gencer/sentry,alexm92/sentry,gencer/sentry,mitsuhiko/sentry,mvaled/sentry,JamesMura/sentry,looker/sentry,jean/sentry,ifduyue/sentry,looker/sentry,ifduyue/sentry,jean/sentry,looker/sentry,jean/sentry,nicholasserra/sentry,mvaled/sentry,looker/sentry,imankulov/sentry
Test some of the digest building functions.
from __future__ import absolute_import from collections import OrderedDict from exam import fixture from sentry.digests import Record from sentry.digests.notifications import ( Notification, event_to_record, rewrite_record, group_records, sort_groups, ) from sentry.testutils import TestCase class RewriteRecordTestCase(TestCase): @fixture def rule(self): return self.event.project.rule_set.all()[0] @fixture def record(self): return event_to_record(self.event, (self.rule,)) def test_success(self): assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={ self.rule.id: self.rule, }, ) == Record( self.record.key, Notification( self.event, [self.rule], ), self.record.timestamp, ) def test_without_group(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={}, rules={ self.rule.id: self.rule, }, ) is None def test_filters_invalid_rules(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={}, ) == Record( self.record.key, Notification(self.event, []), self.record.timestamp, ) class GroupRecordsTestCase(TestCase): @fixture def rule(self): return self.project.rule_set.all()[0] def test_success(self): events = [self.create_event(group=self.group) for _ in xrange(3)] records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events] assert group_records(records) == { self.rule: { self.group: records, }, } class SortRecordsTestCase(TestCase): def test_success(self): rules = list(self.project.rule_set.all()) groups = [self.create_group() for _ in xrange(3)] groups[0].event_count = 10 groups[0].user_count = 4 groups[1].event_count = 5 groups[1].user_count = 2 groups[2].event_count = 5 groups[2].user_count = 1 grouped = { rules[0]: { groups[0]: [], }, rules[1]: { groups[1]: [], groups[2]: [], }, } assert sort_groups(grouped) == OrderedDict(( (rules[1], OrderedDict(( (groups[1], []), (groups[2], []), ))), (rules[0], OrderedDict(( (groups[0], []), ))), ))
<commit_before><commit_msg>Test some of the digest building functions.<commit_after>
from __future__ import absolute_import from collections import OrderedDict from exam import fixture from sentry.digests import Record from sentry.digests.notifications import ( Notification, event_to_record, rewrite_record, group_records, sort_groups, ) from sentry.testutils import TestCase class RewriteRecordTestCase(TestCase): @fixture def rule(self): return self.event.project.rule_set.all()[0] @fixture def record(self): return event_to_record(self.event, (self.rule,)) def test_success(self): assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={ self.rule.id: self.rule, }, ) == Record( self.record.key, Notification( self.event, [self.rule], ), self.record.timestamp, ) def test_without_group(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={}, rules={ self.rule.id: self.rule, }, ) is None def test_filters_invalid_rules(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={}, ) == Record( self.record.key, Notification(self.event, []), self.record.timestamp, ) class GroupRecordsTestCase(TestCase): @fixture def rule(self): return self.project.rule_set.all()[0] def test_success(self): events = [self.create_event(group=self.group) for _ in xrange(3)] records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events] assert group_records(records) == { self.rule: { self.group: records, }, } class SortRecordsTestCase(TestCase): def test_success(self): rules = list(self.project.rule_set.all()) groups = [self.create_group() for _ in xrange(3)] groups[0].event_count = 10 groups[0].user_count = 4 groups[1].event_count = 5 groups[1].user_count = 2 groups[2].event_count = 5 groups[2].user_count = 1 grouped = { rules[0]: { groups[0]: [], }, rules[1]: { groups[1]: [], groups[2]: [], }, } assert sort_groups(grouped) == OrderedDict(( (rules[1], OrderedDict(( (groups[1], []), (groups[2], []), ))), (rules[0], OrderedDict(( (groups[0], []), ))), ))
Test some of the digest building functions.from __future__ import absolute_import from collections import OrderedDict from exam import fixture from sentry.digests import Record from sentry.digests.notifications import ( Notification, event_to_record, rewrite_record, group_records, sort_groups, ) from sentry.testutils import TestCase class RewriteRecordTestCase(TestCase): @fixture def rule(self): return self.event.project.rule_set.all()[0] @fixture def record(self): return event_to_record(self.event, (self.rule,)) def test_success(self): assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={ self.rule.id: self.rule, }, ) == Record( self.record.key, Notification( self.event, [self.rule], ), self.record.timestamp, ) def test_without_group(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={}, rules={ self.rule.id: self.rule, }, ) is None def test_filters_invalid_rules(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={}, ) == Record( self.record.key, Notification(self.event, []), self.record.timestamp, ) class GroupRecordsTestCase(TestCase): @fixture def rule(self): return self.project.rule_set.all()[0] def test_success(self): events = [self.create_event(group=self.group) for _ in xrange(3)] records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events] assert group_records(records) == { self.rule: { self.group: records, }, } class SortRecordsTestCase(TestCase): def test_success(self): rules = list(self.project.rule_set.all()) groups = [self.create_group() for _ in xrange(3)] groups[0].event_count = 10 groups[0].user_count = 4 groups[1].event_count = 5 groups[1].user_count = 2 groups[2].event_count = 5 groups[2].user_count = 1 grouped = { rules[0]: { groups[0]: [], }, rules[1]: { groups[1]: [], groups[2]: [], }, } assert sort_groups(grouped) == OrderedDict(( (rules[1], OrderedDict(( (groups[1], []), (groups[2], []), ))), (rules[0], OrderedDict(( (groups[0], []), ))), ))
<commit_before><commit_msg>Test some of the digest building functions.<commit_after>from __future__ import absolute_import from collections import OrderedDict from exam import fixture from sentry.digests import Record from sentry.digests.notifications import ( Notification, event_to_record, rewrite_record, group_records, sort_groups, ) from sentry.testutils import TestCase class RewriteRecordTestCase(TestCase): @fixture def rule(self): return self.event.project.rule_set.all()[0] @fixture def record(self): return event_to_record(self.event, (self.rule,)) def test_success(self): assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={ self.rule.id: self.rule, }, ) == Record( self.record.key, Notification( self.event, [self.rule], ), self.record.timestamp, ) def test_without_group(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={}, rules={ self.rule.id: self.rule, }, ) is None def test_filters_invalid_rules(self): # If the record can't be associated with a group, it should be returned as None. assert rewrite_record( self.record, project=self.event.project, groups={ self.event.group.id: self.event.group, }, rules={}, ) == Record( self.record.key, Notification(self.event, []), self.record.timestamp, ) class GroupRecordsTestCase(TestCase): @fixture def rule(self): return self.project.rule_set.all()[0] def test_success(self): events = [self.create_event(group=self.group) for _ in xrange(3)] records = [Record(event.id, Notification(event, [self.rule]), event.datetime) for event in events] assert group_records(records) == { self.rule: { self.group: records, }, } class SortRecordsTestCase(TestCase): def test_success(self): rules = list(self.project.rule_set.all()) groups = [self.create_group() for _ in xrange(3)] groups[0].event_count = 10 groups[0].user_count = 4 groups[1].event_count = 5 groups[1].user_count = 2 groups[2].event_count = 5 groups[2].user_count = 1 grouped = { rules[0]: { groups[0]: [], }, rules[1]: { groups[1]: [], groups[2]: [], }, } assert sort_groups(grouped) == OrderedDict(( (rules[1], OrderedDict(( (groups[1], []), (groups[2], []), ))), (rules[0], OrderedDict(( (groups[0], []), ))), ))
210695ab755a9c1d1d863eec0fedb4ac63931fda
utest/resources/robotdata/datagenerator.py
utest/resources/robotdata/datagenerator.py
#!/usr/bin/env python from getopt import getopt, GetoptError from random import randint import os SUITE=\ """*** Settings *** Resource resource.txt *** Test Cases *** %TESTCASES% *** Keywords *** %KEYWORDS% """ RESOURCE=\ """*** Variables *** @{Resource Var} MOI *** Keywords *** %KEYWORDS% """ KEYWORD_TEMPLATE=\ """My Keyword %KW_ID% No Operation""" TEST_CASE_TEMPLATE=\ """My Test %TEST_ID% My Keyword %KW_ID% Log moi""" def generate_tests(number_of_tests, number_of_keywords): return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\ .replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\ for test_id in xrange(number_of_tests)) def generate_keywords(number_of_keywords): return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in xrange(number_of_keywords)) def generate_suite(number_of_tests, number_of_keywords): return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\ .replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate_resource(number_of_keywords): return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate(directory, suites, tests, keywords): os.mkdir(directory) for suite_index in xrange(suites): f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w') f.write(generate_suite(tests, keywords)) f.close() r = open(os.path.join('.', directory, 'resource.txt'), 'w') r.write(generate_resource(keywords)) r.close() def usage(): print 'datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]' def main(args): try: opts, args = getopt(args, 'd:s:t:k:', []) except GetoptError, e: print e usage() sys.exit(2) if len(opts) != 4: print opts usage() sys.exit(2) for opt, arg in opts: if opt == '-d': directory = arg if opt == '-s': suites = int(arg) if opt == '-t': tests = int(arg) if opt == '-k': keywords = int(arg) generate(directory, suites, tests, keywords) if __name__ == '__main__': import sys main(sys.argv[1:])
Add test data generator tool
Add test data generator tool
Python
apache-2.0
HelioGuilherme66/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,HelioGuilherme66/RIDE,robotframework/RIDE,robotframework/RIDE,caio2k/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,robotframework/RIDE
Add test data generator tool
#!/usr/bin/env python from getopt import getopt, GetoptError from random import randint import os SUITE=\ """*** Settings *** Resource resource.txt *** Test Cases *** %TESTCASES% *** Keywords *** %KEYWORDS% """ RESOURCE=\ """*** Variables *** @{Resource Var} MOI *** Keywords *** %KEYWORDS% """ KEYWORD_TEMPLATE=\ """My Keyword %KW_ID% No Operation""" TEST_CASE_TEMPLATE=\ """My Test %TEST_ID% My Keyword %KW_ID% Log moi""" def generate_tests(number_of_tests, number_of_keywords): return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\ .replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\ for test_id in xrange(number_of_tests)) def generate_keywords(number_of_keywords): return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in xrange(number_of_keywords)) def generate_suite(number_of_tests, number_of_keywords): return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\ .replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate_resource(number_of_keywords): return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate(directory, suites, tests, keywords): os.mkdir(directory) for suite_index in xrange(suites): f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w') f.write(generate_suite(tests, keywords)) f.close() r = open(os.path.join('.', directory, 'resource.txt'), 'w') r.write(generate_resource(keywords)) r.close() def usage(): print 'datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]' def main(args): try: opts, args = getopt(args, 'd:s:t:k:', []) except GetoptError, e: print e usage() sys.exit(2) if len(opts) != 4: print opts usage() sys.exit(2) for opt, arg in opts: if opt == '-d': directory = arg if opt == '-s': suites = int(arg) if opt == '-t': tests = int(arg) if opt == '-k': keywords = int(arg) generate(directory, suites, tests, keywords) if __name__ == '__main__': import sys main(sys.argv[1:])
<commit_before><commit_msg>Add test data generator tool<commit_after>
#!/usr/bin/env python from getopt import getopt, GetoptError from random import randint import os SUITE=\ """*** Settings *** Resource resource.txt *** Test Cases *** %TESTCASES% *** Keywords *** %KEYWORDS% """ RESOURCE=\ """*** Variables *** @{Resource Var} MOI *** Keywords *** %KEYWORDS% """ KEYWORD_TEMPLATE=\ """My Keyword %KW_ID% No Operation""" TEST_CASE_TEMPLATE=\ """My Test %TEST_ID% My Keyword %KW_ID% Log moi""" def generate_tests(number_of_tests, number_of_keywords): return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\ .replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\ for test_id in xrange(number_of_tests)) def generate_keywords(number_of_keywords): return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in xrange(number_of_keywords)) def generate_suite(number_of_tests, number_of_keywords): return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\ .replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate_resource(number_of_keywords): return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate(directory, suites, tests, keywords): os.mkdir(directory) for suite_index in xrange(suites): f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w') f.write(generate_suite(tests, keywords)) f.close() r = open(os.path.join('.', directory, 'resource.txt'), 'w') r.write(generate_resource(keywords)) r.close() def usage(): print 'datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]' def main(args): try: opts, args = getopt(args, 'd:s:t:k:', []) except GetoptError, e: print e usage() sys.exit(2) if len(opts) != 4: print opts usage() sys.exit(2) for opt, arg in opts: if opt == '-d': directory = arg if opt == '-s': suites = int(arg) if opt == '-t': tests = int(arg) if opt == '-k': keywords = int(arg) generate(directory, suites, tests, keywords) if __name__ == '__main__': import sys main(sys.argv[1:])
Add test data generator tool#!/usr/bin/env python from getopt import getopt, GetoptError from random import randint import os SUITE=\ """*** Settings *** Resource resource.txt *** Test Cases *** %TESTCASES% *** Keywords *** %KEYWORDS% """ RESOURCE=\ """*** Variables *** @{Resource Var} MOI *** Keywords *** %KEYWORDS% """ KEYWORD_TEMPLATE=\ """My Keyword %KW_ID% No Operation""" TEST_CASE_TEMPLATE=\ """My Test %TEST_ID% My Keyword %KW_ID% Log moi""" def generate_tests(number_of_tests, number_of_keywords): return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\ .replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\ for test_id in xrange(number_of_tests)) def generate_keywords(number_of_keywords): return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in xrange(number_of_keywords)) def generate_suite(number_of_tests, number_of_keywords): return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\ .replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate_resource(number_of_keywords): return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate(directory, suites, tests, keywords): os.mkdir(directory) for suite_index in xrange(suites): f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w') f.write(generate_suite(tests, keywords)) f.close() r = open(os.path.join('.', directory, 'resource.txt'), 'w') r.write(generate_resource(keywords)) r.close() def usage(): print 'datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]' def main(args): try: opts, args = getopt(args, 'd:s:t:k:', []) except GetoptError, e: print e usage() sys.exit(2) if len(opts) != 4: print opts usage() sys.exit(2) for opt, arg in opts: if opt == '-d': directory = arg if opt == '-s': suites = int(arg) if opt == '-t': tests = int(arg) if opt == '-k': keywords = int(arg) generate(directory, suites, tests, keywords) if __name__ == '__main__': import sys main(sys.argv[1:])
<commit_before><commit_msg>Add test data generator tool<commit_after>#!/usr/bin/env python from getopt import getopt, GetoptError from random import randint import os SUITE=\ """*** Settings *** Resource resource.txt *** Test Cases *** %TESTCASES% *** Keywords *** %KEYWORDS% """ RESOURCE=\ """*** Variables *** @{Resource Var} MOI *** Keywords *** %KEYWORDS% """ KEYWORD_TEMPLATE=\ """My Keyword %KW_ID% No Operation""" TEST_CASE_TEMPLATE=\ """My Test %TEST_ID% My Keyword %KW_ID% Log moi""" def generate_tests(number_of_tests, number_of_keywords): return '\n'.join(TEST_CASE_TEMPLATE.replace('%TEST_ID%', str(test_id))\ .replace('%KW_ID%', str(randint(0,number_of_keywords-1)))\ for test_id in xrange(number_of_tests)) def generate_keywords(number_of_keywords): return '\n'.join(KEYWORD_TEMPLATE.replace('%KW_ID%', str(i)) for i in xrange(number_of_keywords)) def generate_suite(number_of_tests, number_of_keywords): return SUITE.replace('%TESTCASES%', generate_tests(number_of_tests, number_of_keywords))\ .replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate_resource(number_of_keywords): return RESOURCE.replace('%KEYWORDS%', generate_keywords(number_of_keywords)) def generate(directory, suites, tests, keywords): os.mkdir(directory) for suite_index in xrange(suites): f = open(os.path.join('.', directory, 'suite%s.txt' % suite_index), 'w') f.write(generate_suite(tests, keywords)) f.close() r = open(os.path.join('.', directory, 'resource.txt'), 'w') r.write(generate_resource(keywords)) r.close() def usage(): print 'datagenerator.py -d [directory] -s [NUMBER OF SUITES] -t [NUMBER OF TESTS IN SUITE] -k [NUMBER OF KEYWORDS]' def main(args): try: opts, args = getopt(args, 'd:s:t:k:', []) except GetoptError, e: print e usage() sys.exit(2) if len(opts) != 4: print opts usage() sys.exit(2) for opt, arg in opts: if opt == '-d': directory = arg if opt == '-s': suites = int(arg) if opt == '-t': tests = int(arg) if opt == '-k': keywords = int(arg) generate(directory, suites, tests, keywords) if __name__ == '__main__': import sys main(sys.argv[1:])
9f68956abafd93c109f7304e393dd7424916b8bb
scripts/tools/morphlength_from_annotations.py
scripts/tools/morphlength_from_annotations.py
from __future__ import division import fileinput def main(): tot_morph_count = 0 tot_length = 0 for line in fileinput.input(): word, segm = line.strip().split(None, 1) segmentations = segm.split(',') num_morphs = [len([x for x in s.split(None) if x.strip().strip("~") != ""]) for s in segmentations] tot_morph_count += sum(num_morphs) / len(num_morphs) tot_length += len(word) print(tot_length / tot_morph_count) if __name__ == "__main__": main()
Create tool to count morph length from annotation file
Create tool to count morph length from annotation file Small bit a quick hack at the moment, should be tranformed to a real script maybe?
Python
bsd-2-clause
aalto-speech/morfessor
Create tool to count morph length from annotation file Small bit a quick hack at the moment, should be tranformed to a real script maybe?
from __future__ import division import fileinput def main(): tot_morph_count = 0 tot_length = 0 for line in fileinput.input(): word, segm = line.strip().split(None, 1) segmentations = segm.split(',') num_morphs = [len([x for x in s.split(None) if x.strip().strip("~") != ""]) for s in segmentations] tot_morph_count += sum(num_morphs) / len(num_morphs) tot_length += len(word) print(tot_length / tot_morph_count) if __name__ == "__main__": main()
<commit_before><commit_msg>Create tool to count morph length from annotation file Small bit a quick hack at the moment, should be tranformed to a real script maybe?<commit_after>
from __future__ import division import fileinput def main(): tot_morph_count = 0 tot_length = 0 for line in fileinput.input(): word, segm = line.strip().split(None, 1) segmentations = segm.split(',') num_morphs = [len([x for x in s.split(None) if x.strip().strip("~") != ""]) for s in segmentations] tot_morph_count += sum(num_morphs) / len(num_morphs) tot_length += len(word) print(tot_length / tot_morph_count) if __name__ == "__main__": main()
Create tool to count morph length from annotation file Small bit a quick hack at the moment, should be tranformed to a real script maybe?from __future__ import division import fileinput def main(): tot_morph_count = 0 tot_length = 0 for line in fileinput.input(): word, segm = line.strip().split(None, 1) segmentations = segm.split(',') num_morphs = [len([x for x in s.split(None) if x.strip().strip("~") != ""]) for s in segmentations] tot_morph_count += sum(num_morphs) / len(num_morphs) tot_length += len(word) print(tot_length / tot_morph_count) if __name__ == "__main__": main()
<commit_before><commit_msg>Create tool to count morph length from annotation file Small bit a quick hack at the moment, should be tranformed to a real script maybe?<commit_after>from __future__ import division import fileinput def main(): tot_morph_count = 0 tot_length = 0 for line in fileinput.input(): word, segm = line.strip().split(None, 1) segmentations = segm.split(',') num_morphs = [len([x for x in s.split(None) if x.strip().strip("~") != ""]) for s in segmentations] tot_morph_count += sum(num_morphs) / len(num_morphs) tot_length += len(word) print(tot_length / tot_morph_count) if __name__ == "__main__": main()
c7c2be0f2edc47e88d1dcc6169098b8018c8b108
tests/formats_test/device_test.py
tests/formats_test/device_test.py
#!/usr/bin/python import unittest import blivet class DeviceFormatTestCase(unittest.TestCase): def testFormats(self): absolute_path = "/abs/path" host_path = "host:path" garbage = "abc#<def>" for fclass in blivet.formats.device_formats.values(): an_fs = fclass() # all formats accept None for device try: an_fs.device = None except ValueError: raise self.failureException("ValueError raised") # NoDevFS accepts anything if isinstance(an_fs, blivet.formats.fs.NoDevFS): try: an_fs.device = absolute_path an_fs.device = host_path an_fs.device = garbage an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") elif isinstance(an_fs, blivet.formats.fs.NFS): try: an_fs.device = host_path except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = absolute_path with self.assertRaises(ValueError): an_fs.device = garbage with self.assertRaises(ValueError): an_fs.device = "" else: try: an_fs.device = absolute_path an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = host_path with self.assertRaises(ValueError): an_fs.device = garbage class DeviceValueTestCase(unittest.TestCase): def testValue(self): for fclass in blivet.formats.device_formats.values(): an_fs = fclass() if isinstance(an_fs, blivet.formats.fs.TmpFS): # type == device == _type == _device == "tmpfs" always vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device] self.assertTrue(all(x == "tmpfs" for x in vals)) an_fs.device = "new" self.assertTrue(all(x == "tmpfs" for x in vals)) elif isinstance(an_fs, blivet.formats.fs.NoDevFS): # type == device == _type == _device vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == an_fs._type for x in vals)) an_fs.device = "new" # _type is unchanged, but type, device, _device have new value self.assertNotEqual(an_fs._type, "new") vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == "new" for x in vals)) else: # other formats are straightforward typ = an_fs.type an_fs.device = "/abc:/def" self.assertEqual(an_fs.type, typ) self.assertEqual(an_fs.device, "/abc:/def") if __name__ == "__main__": unittest.main()
Add a test to check properties of device paths assigned to formats.
Add a test to check properties of device paths assigned to formats. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
Python
lgpl-2.1
AdamWill/blivet,AdamWill/blivet,rvykydal/blivet,vojtechtrefny/blivet,vpodzime/blivet,rhinstaller/blivet,vpodzime/blivet,jkonecny12/blivet,vojtechtrefny/blivet,dwlehman/blivet,rvykydal/blivet,jkonecny12/blivet,rhinstaller/blivet,dwlehman/blivet
Add a test to check properties of device paths assigned to formats. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
#!/usr/bin/python import unittest import blivet class DeviceFormatTestCase(unittest.TestCase): def testFormats(self): absolute_path = "/abs/path" host_path = "host:path" garbage = "abc#<def>" for fclass in blivet.formats.device_formats.values(): an_fs = fclass() # all formats accept None for device try: an_fs.device = None except ValueError: raise self.failureException("ValueError raised") # NoDevFS accepts anything if isinstance(an_fs, blivet.formats.fs.NoDevFS): try: an_fs.device = absolute_path an_fs.device = host_path an_fs.device = garbage an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") elif isinstance(an_fs, blivet.formats.fs.NFS): try: an_fs.device = host_path except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = absolute_path with self.assertRaises(ValueError): an_fs.device = garbage with self.assertRaises(ValueError): an_fs.device = "" else: try: an_fs.device = absolute_path an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = host_path with self.assertRaises(ValueError): an_fs.device = garbage class DeviceValueTestCase(unittest.TestCase): def testValue(self): for fclass in blivet.formats.device_formats.values(): an_fs = fclass() if isinstance(an_fs, blivet.formats.fs.TmpFS): # type == device == _type == _device == "tmpfs" always vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device] self.assertTrue(all(x == "tmpfs" for x in vals)) an_fs.device = "new" self.assertTrue(all(x == "tmpfs" for x in vals)) elif isinstance(an_fs, blivet.formats.fs.NoDevFS): # type == device == _type == _device vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == an_fs._type for x in vals)) an_fs.device = "new" # _type is unchanged, but type, device, _device have new value self.assertNotEqual(an_fs._type, "new") vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == "new" for x in vals)) else: # other formats are straightforward typ = an_fs.type an_fs.device = "/abc:/def" self.assertEqual(an_fs.type, typ) self.assertEqual(an_fs.device, "/abc:/def") if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add a test to check properties of device paths assigned to formats. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>
#!/usr/bin/python import unittest import blivet class DeviceFormatTestCase(unittest.TestCase): def testFormats(self): absolute_path = "/abs/path" host_path = "host:path" garbage = "abc#<def>" for fclass in blivet.formats.device_formats.values(): an_fs = fclass() # all formats accept None for device try: an_fs.device = None except ValueError: raise self.failureException("ValueError raised") # NoDevFS accepts anything if isinstance(an_fs, blivet.formats.fs.NoDevFS): try: an_fs.device = absolute_path an_fs.device = host_path an_fs.device = garbage an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") elif isinstance(an_fs, blivet.formats.fs.NFS): try: an_fs.device = host_path except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = absolute_path with self.assertRaises(ValueError): an_fs.device = garbage with self.assertRaises(ValueError): an_fs.device = "" else: try: an_fs.device = absolute_path an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = host_path with self.assertRaises(ValueError): an_fs.device = garbage class DeviceValueTestCase(unittest.TestCase): def testValue(self): for fclass in blivet.formats.device_formats.values(): an_fs = fclass() if isinstance(an_fs, blivet.formats.fs.TmpFS): # type == device == _type == _device == "tmpfs" always vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device] self.assertTrue(all(x == "tmpfs" for x in vals)) an_fs.device = "new" self.assertTrue(all(x == "tmpfs" for x in vals)) elif isinstance(an_fs, blivet.formats.fs.NoDevFS): # type == device == _type == _device vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == an_fs._type for x in vals)) an_fs.device = "new" # _type is unchanged, but type, device, _device have new value self.assertNotEqual(an_fs._type, "new") vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == "new" for x in vals)) else: # other formats are straightforward typ = an_fs.type an_fs.device = "/abc:/def" self.assertEqual(an_fs.type, typ) self.assertEqual(an_fs.device, "/abc:/def") if __name__ == "__main__": unittest.main()
Add a test to check properties of device paths assigned to formats. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>#!/usr/bin/python import unittest import blivet class DeviceFormatTestCase(unittest.TestCase): def testFormats(self): absolute_path = "/abs/path" host_path = "host:path" garbage = "abc#<def>" for fclass in blivet.formats.device_formats.values(): an_fs = fclass() # all formats accept None for device try: an_fs.device = None except ValueError: raise self.failureException("ValueError raised") # NoDevFS accepts anything if isinstance(an_fs, blivet.formats.fs.NoDevFS): try: an_fs.device = absolute_path an_fs.device = host_path an_fs.device = garbage an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") elif isinstance(an_fs, blivet.formats.fs.NFS): try: an_fs.device = host_path except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = absolute_path with self.assertRaises(ValueError): an_fs.device = garbage with self.assertRaises(ValueError): an_fs.device = "" else: try: an_fs.device = absolute_path an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = host_path with self.assertRaises(ValueError): an_fs.device = garbage class DeviceValueTestCase(unittest.TestCase): def testValue(self): for fclass in blivet.formats.device_formats.values(): an_fs = fclass() if isinstance(an_fs, blivet.formats.fs.TmpFS): # type == device == _type == _device == "tmpfs" always vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device] self.assertTrue(all(x == "tmpfs" for x in vals)) an_fs.device = "new" self.assertTrue(all(x == "tmpfs" for x in vals)) elif isinstance(an_fs, blivet.formats.fs.NoDevFS): # type == device == _type == _device vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == an_fs._type for x in vals)) an_fs.device = "new" # _type is unchanged, but type, device, _device have new value self.assertNotEqual(an_fs._type, "new") vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == "new" for x in vals)) else: # other formats are straightforward typ = an_fs.type an_fs.device = "/abc:/def" self.assertEqual(an_fs.type, typ) self.assertEqual(an_fs.device, "/abc:/def") if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add a test to check properties of device paths assigned to formats. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com><commit_after>#!/usr/bin/python import unittest import blivet class DeviceFormatTestCase(unittest.TestCase): def testFormats(self): absolute_path = "/abs/path" host_path = "host:path" garbage = "abc#<def>" for fclass in blivet.formats.device_formats.values(): an_fs = fclass() # all formats accept None for device try: an_fs.device = None except ValueError: raise self.failureException("ValueError raised") # NoDevFS accepts anything if isinstance(an_fs, blivet.formats.fs.NoDevFS): try: an_fs.device = absolute_path an_fs.device = host_path an_fs.device = garbage an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") elif isinstance(an_fs, blivet.formats.fs.NFS): try: an_fs.device = host_path except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = absolute_path with self.assertRaises(ValueError): an_fs.device = garbage with self.assertRaises(ValueError): an_fs.device = "" else: try: an_fs.device = absolute_path an_fs.device = "" except ValueError: raise self.failureException("ValueError raised") with self.assertRaises(ValueError): an_fs.device = host_path with self.assertRaises(ValueError): an_fs.device = garbage class DeviceValueTestCase(unittest.TestCase): def testValue(self): for fclass in blivet.formats.device_formats.values(): an_fs = fclass() if isinstance(an_fs, blivet.formats.fs.TmpFS): # type == device == _type == _device == "tmpfs" always vals = [an_fs.type, an_fs.device, an_fs._type, an_fs._device] self.assertTrue(all(x == "tmpfs" for x in vals)) an_fs.device = "new" self.assertTrue(all(x == "tmpfs" for x in vals)) elif isinstance(an_fs, blivet.formats.fs.NoDevFS): # type == device == _type == _device vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == an_fs._type for x in vals)) an_fs.device = "new" # _type is unchanged, but type, device, _device have new value self.assertNotEqual(an_fs._type, "new") vals = [an_fs.type, an_fs.device, an_fs._device] self.assertTrue(all(x == "new" for x in vals)) else: # other formats are straightforward typ = an_fs.type an_fs.device = "/abc:/def" self.assertEqual(an_fs.type, typ) self.assertEqual(an_fs.device, "/abc:/def") if __name__ == "__main__": unittest.main()
611852c39f23bc3117c17df5ba911617605fa60f
pyglab/apirequest.py
pyglab/apirequest.py
import enum import requests _defaults = { 'page': 1, 'per_page': 20, } @enum.unique class RequestType(Enum): GET = 1 POST = 2 PUT = 3 DELETE = 4 class ApiRequest: request_creators = { RequestType.GET: requests.get, RequestType.POST: requests.post, RequestType.PUT: requests.put, RequestType.DELETE: requests.delete, } def __init__(self, request_type, url, token, params={}, sudo=None, page=None, per_page=None): # Build header header = {'PRIVATE-TOKEN': token} if sudo is not None: header['SUDO', sudo] # Build parameters if page is not None: params['page'] = page if per_page is not None: params['per_page'] = per_page r = self.request_creators[request_type](url, params=params, headers=header)
Add initial (non-working) ApiRequest class.
Add initial (non-working) ApiRequest class.
Python
mit
sloede/pyglab,sloede/pyglab
Add initial (non-working) ApiRequest class.
import enum import requests _defaults = { 'page': 1, 'per_page': 20, } @enum.unique class RequestType(Enum): GET = 1 POST = 2 PUT = 3 DELETE = 4 class ApiRequest: request_creators = { RequestType.GET: requests.get, RequestType.POST: requests.post, RequestType.PUT: requests.put, RequestType.DELETE: requests.delete, } def __init__(self, request_type, url, token, params={}, sudo=None, page=None, per_page=None): # Build header header = {'PRIVATE-TOKEN': token} if sudo is not None: header['SUDO', sudo] # Build parameters if page is not None: params['page'] = page if per_page is not None: params['per_page'] = per_page r = self.request_creators[request_type](url, params=params, headers=header)
<commit_before><commit_msg>Add initial (non-working) ApiRequest class.<commit_after>
import enum import requests _defaults = { 'page': 1, 'per_page': 20, } @enum.unique class RequestType(Enum): GET = 1 POST = 2 PUT = 3 DELETE = 4 class ApiRequest: request_creators = { RequestType.GET: requests.get, RequestType.POST: requests.post, RequestType.PUT: requests.put, RequestType.DELETE: requests.delete, } def __init__(self, request_type, url, token, params={}, sudo=None, page=None, per_page=None): # Build header header = {'PRIVATE-TOKEN': token} if sudo is not None: header['SUDO', sudo] # Build parameters if page is not None: params['page'] = page if per_page is not None: params['per_page'] = per_page r = self.request_creators[request_type](url, params=params, headers=header)
Add initial (non-working) ApiRequest class.import enum import requests _defaults = { 'page': 1, 'per_page': 20, } @enum.unique class RequestType(Enum): GET = 1 POST = 2 PUT = 3 DELETE = 4 class ApiRequest: request_creators = { RequestType.GET: requests.get, RequestType.POST: requests.post, RequestType.PUT: requests.put, RequestType.DELETE: requests.delete, } def __init__(self, request_type, url, token, params={}, sudo=None, page=None, per_page=None): # Build header header = {'PRIVATE-TOKEN': token} if sudo is not None: header['SUDO', sudo] # Build parameters if page is not None: params['page'] = page if per_page is not None: params['per_page'] = per_page r = self.request_creators[request_type](url, params=params, headers=header)
<commit_before><commit_msg>Add initial (non-working) ApiRequest class.<commit_after>import enum import requests _defaults = { 'page': 1, 'per_page': 20, } @enum.unique class RequestType(Enum): GET = 1 POST = 2 PUT = 3 DELETE = 4 class ApiRequest: request_creators = { RequestType.GET: requests.get, RequestType.POST: requests.post, RequestType.PUT: requests.put, RequestType.DELETE: requests.delete, } def __init__(self, request_type, url, token, params={}, sudo=None, page=None, per_page=None): # Build header header = {'PRIVATE-TOKEN': token} if sudo is not None: header['SUDO', sudo] # Build parameters if page is not None: params['page'] = page if per_page is not None: params['per_page'] = per_page r = self.request_creators[request_type](url, params=params, headers=header)
ff61653ff66123eb6e445f400855825e0aeb5882
examples/test_contains_selector.py
examples/test_contains_selector.py
from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_contains_selector(self): self.open("https://xkcd.com/2207/") self.assert_text("Math Work", "#ctitle") self.click('a:contains("Next")') self.assert_text("Drone Fishing", "#ctitle")
Add an example test for the ":contains()" selector
Add an example test for the ":contains()" selector
Python
mit
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
Add an example test for the ":contains()" selector
from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_contains_selector(self): self.open("https://xkcd.com/2207/") self.assert_text("Math Work", "#ctitle") self.click('a:contains("Next")') self.assert_text("Drone Fishing", "#ctitle")
<commit_before><commit_msg>Add an example test for the ":contains()" selector<commit_after>
from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_contains_selector(self): self.open("https://xkcd.com/2207/") self.assert_text("Math Work", "#ctitle") self.click('a:contains("Next")') self.assert_text("Drone Fishing", "#ctitle")
Add an example test for the ":contains()" selectorfrom seleniumbase import BaseCase class MyTestClass(BaseCase): def test_contains_selector(self): self.open("https://xkcd.com/2207/") self.assert_text("Math Work", "#ctitle") self.click('a:contains("Next")') self.assert_text("Drone Fishing", "#ctitle")
<commit_before><commit_msg>Add an example test for the ":contains()" selector<commit_after>from seleniumbase import BaseCase class MyTestClass(BaseCase): def test_contains_selector(self): self.open("https://xkcd.com/2207/") self.assert_text("Math Work", "#ctitle") self.click('a:contains("Next")') self.assert_text("Drone Fishing", "#ctitle")
9e975d97a45022d8e18cb67c33af2f8f03ce94de
exponent/test/auth/test_session.py
exponent/test/auth/test_session.py
""" Tests for session-based authentication. """ from axiom import store from exponent.auth import errors, session, user from twisted.trial import unittest from txampext import commandtests class RequestSesssionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the AMP command to request a session identifier. """ command = session.RequestSession argumentObjects = argumentStrings = {} responseObjects = responseStrings = {"sessionIdentifier": "sid"} class LoginSessionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the session-based login AMP command. """ command = session.LoginSession argumentObjects = argumentStrings = { "userIdentifier": "uid", "sessionIdentifier": "sid" } responseObjects = responseStrings = { "sessionIdentifier": "newsid" } class LoginTests(unittest.TestCase): def setUp(self): self.store = store.Store() self.user = user.User(store=store.Store(), uid="uid") session._Session(store=self.user.store, identifier="sid") self._locator = session.Locator(self.store) def _login(self, userIdentifier=None, sessionIdentifier=None): if userIdentifier is None: userIdentifier = self.user.uid if sessionIdentifier is None: sessionIdentifier = self._getCurrentIdentifier() return self._locator.login(userIdentifier, sessionIdentifier) def _getCurrentIdentifier(self): """ Returns the current identifier, as a string. """ return self.user.store.findUnique(session._Session).identifier def test_cantLoginWithInvalidSessionIdentifier(self): """ Tests that users can not log in with an invalid session identifier. """ d = self._login(self.user.uid, "BOGUS") self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginWithInvalidUserIdentifier(self): """ Tests that users can not log in with an invalid user identifier. """ d = self._login("BOGUS", self._getCurrentIdentifier()) self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginTwiceWithSameSessionIdentfiier(self): """ Tests that session identifiers are invalidated after single use. """ oldIdentifier = self._getCurrentIdentifier() d = self._login(self.user.uid, oldIdentifier) return d
Add some tests for session authentication
Add some tests for session authentication
Python
isc
lvh/exponent
Add some tests for session authentication
""" Tests for session-based authentication. """ from axiom import store from exponent.auth import errors, session, user from twisted.trial import unittest from txampext import commandtests class RequestSesssionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the AMP command to request a session identifier. """ command = session.RequestSession argumentObjects = argumentStrings = {} responseObjects = responseStrings = {"sessionIdentifier": "sid"} class LoginSessionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the session-based login AMP command. """ command = session.LoginSession argumentObjects = argumentStrings = { "userIdentifier": "uid", "sessionIdentifier": "sid" } responseObjects = responseStrings = { "sessionIdentifier": "newsid" } class LoginTests(unittest.TestCase): def setUp(self): self.store = store.Store() self.user = user.User(store=store.Store(), uid="uid") session._Session(store=self.user.store, identifier="sid") self._locator = session.Locator(self.store) def _login(self, userIdentifier=None, sessionIdentifier=None): if userIdentifier is None: userIdentifier = self.user.uid if sessionIdentifier is None: sessionIdentifier = self._getCurrentIdentifier() return self._locator.login(userIdentifier, sessionIdentifier) def _getCurrentIdentifier(self): """ Returns the current identifier, as a string. """ return self.user.store.findUnique(session._Session).identifier def test_cantLoginWithInvalidSessionIdentifier(self): """ Tests that users can not log in with an invalid session identifier. """ d = self._login(self.user.uid, "BOGUS") self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginWithInvalidUserIdentifier(self): """ Tests that users can not log in with an invalid user identifier. """ d = self._login("BOGUS", self._getCurrentIdentifier()) self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginTwiceWithSameSessionIdentfiier(self): """ Tests that session identifiers are invalidated after single use. """ oldIdentifier = self._getCurrentIdentifier() d = self._login(self.user.uid, oldIdentifier) return d
<commit_before><commit_msg>Add some tests for session authentication<commit_after>
""" Tests for session-based authentication. """ from axiom import store from exponent.auth import errors, session, user from twisted.trial import unittest from txampext import commandtests class RequestSesssionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the AMP command to request a session identifier. """ command = session.RequestSession argumentObjects = argumentStrings = {} responseObjects = responseStrings = {"sessionIdentifier": "sid"} class LoginSessionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the session-based login AMP command. """ command = session.LoginSession argumentObjects = argumentStrings = { "userIdentifier": "uid", "sessionIdentifier": "sid" } responseObjects = responseStrings = { "sessionIdentifier": "newsid" } class LoginTests(unittest.TestCase): def setUp(self): self.store = store.Store() self.user = user.User(store=store.Store(), uid="uid") session._Session(store=self.user.store, identifier="sid") self._locator = session.Locator(self.store) def _login(self, userIdentifier=None, sessionIdentifier=None): if userIdentifier is None: userIdentifier = self.user.uid if sessionIdentifier is None: sessionIdentifier = self._getCurrentIdentifier() return self._locator.login(userIdentifier, sessionIdentifier) def _getCurrentIdentifier(self): """ Returns the current identifier, as a string. """ return self.user.store.findUnique(session._Session).identifier def test_cantLoginWithInvalidSessionIdentifier(self): """ Tests that users can not log in with an invalid session identifier. """ d = self._login(self.user.uid, "BOGUS") self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginWithInvalidUserIdentifier(self): """ Tests that users can not log in with an invalid user identifier. """ d = self._login("BOGUS", self._getCurrentIdentifier()) self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginTwiceWithSameSessionIdentfiier(self): """ Tests that session identifiers are invalidated after single use. """ oldIdentifier = self._getCurrentIdentifier() d = self._login(self.user.uid, oldIdentifier) return d
Add some tests for session authentication""" Tests for session-based authentication. """ from axiom import store from exponent.auth import errors, session, user from twisted.trial import unittest from txampext import commandtests class RequestSesssionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the AMP command to request a session identifier. """ command = session.RequestSession argumentObjects = argumentStrings = {} responseObjects = responseStrings = {"sessionIdentifier": "sid"} class LoginSessionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the session-based login AMP command. """ command = session.LoginSession argumentObjects = argumentStrings = { "userIdentifier": "uid", "sessionIdentifier": "sid" } responseObjects = responseStrings = { "sessionIdentifier": "newsid" } class LoginTests(unittest.TestCase): def setUp(self): self.store = store.Store() self.user = user.User(store=store.Store(), uid="uid") session._Session(store=self.user.store, identifier="sid") self._locator = session.Locator(self.store) def _login(self, userIdentifier=None, sessionIdentifier=None): if userIdentifier is None: userIdentifier = self.user.uid if sessionIdentifier is None: sessionIdentifier = self._getCurrentIdentifier() return self._locator.login(userIdentifier, sessionIdentifier) def _getCurrentIdentifier(self): """ Returns the current identifier, as a string. """ return self.user.store.findUnique(session._Session).identifier def test_cantLoginWithInvalidSessionIdentifier(self): """ Tests that users can not log in with an invalid session identifier. """ d = self._login(self.user.uid, "BOGUS") self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginWithInvalidUserIdentifier(self): """ Tests that users can not log in with an invalid user identifier. """ d = self._login("BOGUS", self._getCurrentIdentifier()) self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginTwiceWithSameSessionIdentfiier(self): """ Tests that session identifiers are invalidated after single use. """ oldIdentifier = self._getCurrentIdentifier() d = self._login(self.user.uid, oldIdentifier) return d
<commit_before><commit_msg>Add some tests for session authentication<commit_after>""" Tests for session-based authentication. """ from axiom import store from exponent.auth import errors, session, user from twisted.trial import unittest from txampext import commandtests class RequestSesssionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the AMP command to request a session identifier. """ command = session.RequestSession argumentObjects = argumentStrings = {} responseObjects = responseStrings = {"sessionIdentifier": "sid"} class LoginSessionTests(unittest.TestCase, commandtests.CommandTestMixin): """ Tests for the session-based login AMP command. """ command = session.LoginSession argumentObjects = argumentStrings = { "userIdentifier": "uid", "sessionIdentifier": "sid" } responseObjects = responseStrings = { "sessionIdentifier": "newsid" } class LoginTests(unittest.TestCase): def setUp(self): self.store = store.Store() self.user = user.User(store=store.Store(), uid="uid") session._Session(store=self.user.store, identifier="sid") self._locator = session.Locator(self.store) def _login(self, userIdentifier=None, sessionIdentifier=None): if userIdentifier is None: userIdentifier = self.user.uid if sessionIdentifier is None: sessionIdentifier = self._getCurrentIdentifier() return self._locator.login(userIdentifier, sessionIdentifier) def _getCurrentIdentifier(self): """ Returns the current identifier, as a string. """ return self.user.store.findUnique(session._Session).identifier def test_cantLoginWithInvalidSessionIdentifier(self): """ Tests that users can not log in with an invalid session identifier. """ d = self._login(self.user.uid, "BOGUS") self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginWithInvalidUserIdentifier(self): """ Tests that users can not log in with an invalid user identifier. """ d = self._login("BOGUS", self._getCurrentIdentifier()) self.assertFailure(d, errors.BadCredentials) return d def test_cantLoginTwiceWithSameSessionIdentfiier(self): """ Tests that session identifiers are invalidated after single use. """ oldIdentifier = self._getCurrentIdentifier() d = self._login(self.user.uid, oldIdentifier) return d
cf8897e28024171e94e0d83f2026166ab9537f0b
salt/cli/__init__.py
salt/cli/__init__.py
''' The management of salt command line utilities are stored in here ''' # Import python libs import optparse import os import sys # Import salt components import salt.client class SaltCMD(object): ''' The execution of a salt command happens here ''' def __init__(self): ''' Cretae a SaltCMD object ''' self.opts = self.__parse() def __parse(self): ''' Parse the command line ''' parser = optparse.OptionParser() parser.add_option('-t', '--timeout', default=5, dest='timeout', help='Set the return timeout for batch jobs') options, args = parser.parse_args() opts = {} opts['timeout'] = options.timeout opts['tgt'] = args[1] opts['fun'] = args[2] opts['arg'] = args[3:] return opts def run(self): ''' Execute the salt command line ''' local = salt.client.LocalClient() print local.cmd(self.opts['tgt'], self.opts['fun'], self.opts['arg'], self.opts['timeout'])
Set up cli executor for salt command lin
Set up cli executor for salt command lin
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
Set up cli executor for salt command lin
''' The management of salt command line utilities are stored in here ''' # Import python libs import optparse import os import sys # Import salt components import salt.client class SaltCMD(object): ''' The execution of a salt command happens here ''' def __init__(self): ''' Cretae a SaltCMD object ''' self.opts = self.__parse() def __parse(self): ''' Parse the command line ''' parser = optparse.OptionParser() parser.add_option('-t', '--timeout', default=5, dest='timeout', help='Set the return timeout for batch jobs') options, args = parser.parse_args() opts = {} opts['timeout'] = options.timeout opts['tgt'] = args[1] opts['fun'] = args[2] opts['arg'] = args[3:] return opts def run(self): ''' Execute the salt command line ''' local = salt.client.LocalClient() print local.cmd(self.opts['tgt'], self.opts['fun'], self.opts['arg'], self.opts['timeout'])
<commit_before><commit_msg>Set up cli executor for salt command lin<commit_after>
''' The management of salt command line utilities are stored in here ''' # Import python libs import optparse import os import sys # Import salt components import salt.client class SaltCMD(object): ''' The execution of a salt command happens here ''' def __init__(self): ''' Cretae a SaltCMD object ''' self.opts = self.__parse() def __parse(self): ''' Parse the command line ''' parser = optparse.OptionParser() parser.add_option('-t', '--timeout', default=5, dest='timeout', help='Set the return timeout for batch jobs') options, args = parser.parse_args() opts = {} opts['timeout'] = options.timeout opts['tgt'] = args[1] opts['fun'] = args[2] opts['arg'] = args[3:] return opts def run(self): ''' Execute the salt command line ''' local = salt.client.LocalClient() print local.cmd(self.opts['tgt'], self.opts['fun'], self.opts['arg'], self.opts['timeout'])
Set up cli executor for salt command lin''' The management of salt command line utilities are stored in here ''' # Import python libs import optparse import os import sys # Import salt components import salt.client class SaltCMD(object): ''' The execution of a salt command happens here ''' def __init__(self): ''' Cretae a SaltCMD object ''' self.opts = self.__parse() def __parse(self): ''' Parse the command line ''' parser = optparse.OptionParser() parser.add_option('-t', '--timeout', default=5, dest='timeout', help='Set the return timeout for batch jobs') options, args = parser.parse_args() opts = {} opts['timeout'] = options.timeout opts['tgt'] = args[1] opts['fun'] = args[2] opts['arg'] = args[3:] return opts def run(self): ''' Execute the salt command line ''' local = salt.client.LocalClient() print local.cmd(self.opts['tgt'], self.opts['fun'], self.opts['arg'], self.opts['timeout'])
<commit_before><commit_msg>Set up cli executor for salt command lin<commit_after>''' The management of salt command line utilities are stored in here ''' # Import python libs import optparse import os import sys # Import salt components import salt.client class SaltCMD(object): ''' The execution of a salt command happens here ''' def __init__(self): ''' Cretae a SaltCMD object ''' self.opts = self.__parse() def __parse(self): ''' Parse the command line ''' parser = optparse.OptionParser() parser.add_option('-t', '--timeout', default=5, dest='timeout', help='Set the return timeout for batch jobs') options, args = parser.parse_args() opts = {} opts['timeout'] = options.timeout opts['tgt'] = args[1] opts['fun'] = args[2] opts['arg'] = args[3:] return opts def run(self): ''' Execute the salt command line ''' local = salt.client.LocalClient() print local.cmd(self.opts['tgt'], self.opts['fun'], self.opts['arg'], self.opts['timeout'])
c73fc2993ab3af594812ae901ff8439b4eddf187
numpy/linalg/tests/test_linalg.py
numpy/linalg/tests/test_linalg.py
""" Test functions for linalg module """ from numpy.testing import * set_package_path() from numpy import array, single, double, csingle, cdouble, dot, identity, \ multiply from numpy import linalg restore_path() old_assert_almost_equal = assert_almost_equal def assert_almost_equal(a, b, **kw): if a.dtype.type in (single, csingle): decimal = 6 else: decimal = 12 old_assert_almost_equal(a, b, decimal=decimal, **kw) class LinalgTestCase(NumpyTestCase): def _check(self, dtype): a = array([[1.,2.], [3.,4.]], dtype=dtype) b = array([2., 1.], dtype=dtype) self.do(a, b) def check_single(self): self._check(single) def check_double(self): self._check(double) def check_csingle(self): self._check(csingle) def check_cdouble(self): self._check(cdouble) class test_solve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) class test_inv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(a.shape[0])) class test_eigvals(LinalgTestCase): def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) class test_eig(LinalgTestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) assert_almost_equal(dot(a, evectors), evectors*evalues) class test_svd(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) assert_almost_equal(a, dot(u*s, vt)) class test_pinv(LinalgTestCase): def do(self, a, b): a_ginv = linalg.pinv(a) assert_almost_equal(dot(a, a_ginv), identity(a.shape[0])) class test_det(LinalgTestCase): def do(self, a, b): d = linalg.det(a) ev = linalg.eigvals(a) assert_almost_equal(d, multiply.reduce(ev)) class test_lstsq(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b) assert_almost_equal(b, dot(a, x)) assert_equal(rank, a.shape[0]) assert_almost_equal(sv, s) if __name__ == '__main__': NumpyTest().run()
Add test cases for linalg
Add test cases for linalg git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2818 94b884b6-d6fd-0310-90d3-974f1d3f35e1
Python
bsd-3-clause
Ademan/NumPy-GSoC,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,illume/numpy3k,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,efiring/numpy-work,efiring/numpy-work,illume/numpy3k,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,teoliphant/numpy-refactor,efiring/numpy-work,Ademan/NumPy-GSoC,chadnetzer/numpy-gaurdro,teoliphant/numpy-refactor,illume/numpy3k,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint
Add test cases for linalg git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2818 94b884b6-d6fd-0310-90d3-974f1d3f35e1
""" Test functions for linalg module """ from numpy.testing import * set_package_path() from numpy import array, single, double, csingle, cdouble, dot, identity, \ multiply from numpy import linalg restore_path() old_assert_almost_equal = assert_almost_equal def assert_almost_equal(a, b, **kw): if a.dtype.type in (single, csingle): decimal = 6 else: decimal = 12 old_assert_almost_equal(a, b, decimal=decimal, **kw) class LinalgTestCase(NumpyTestCase): def _check(self, dtype): a = array([[1.,2.], [3.,4.]], dtype=dtype) b = array([2., 1.], dtype=dtype) self.do(a, b) def check_single(self): self._check(single) def check_double(self): self._check(double) def check_csingle(self): self._check(csingle) def check_cdouble(self): self._check(cdouble) class test_solve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) class test_inv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(a.shape[0])) class test_eigvals(LinalgTestCase): def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) class test_eig(LinalgTestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) assert_almost_equal(dot(a, evectors), evectors*evalues) class test_svd(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) assert_almost_equal(a, dot(u*s, vt)) class test_pinv(LinalgTestCase): def do(self, a, b): a_ginv = linalg.pinv(a) assert_almost_equal(dot(a, a_ginv), identity(a.shape[0])) class test_det(LinalgTestCase): def do(self, a, b): d = linalg.det(a) ev = linalg.eigvals(a) assert_almost_equal(d, multiply.reduce(ev)) class test_lstsq(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b) assert_almost_equal(b, dot(a, x)) assert_equal(rank, a.shape[0]) assert_almost_equal(sv, s) if __name__ == '__main__': NumpyTest().run()
<commit_before><commit_msg>Add test cases for linalg git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2818 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>
""" Test functions for linalg module """ from numpy.testing import * set_package_path() from numpy import array, single, double, csingle, cdouble, dot, identity, \ multiply from numpy import linalg restore_path() old_assert_almost_equal = assert_almost_equal def assert_almost_equal(a, b, **kw): if a.dtype.type in (single, csingle): decimal = 6 else: decimal = 12 old_assert_almost_equal(a, b, decimal=decimal, **kw) class LinalgTestCase(NumpyTestCase): def _check(self, dtype): a = array([[1.,2.], [3.,4.]], dtype=dtype) b = array([2., 1.], dtype=dtype) self.do(a, b) def check_single(self): self._check(single) def check_double(self): self._check(double) def check_csingle(self): self._check(csingle) def check_cdouble(self): self._check(cdouble) class test_solve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) class test_inv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(a.shape[0])) class test_eigvals(LinalgTestCase): def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) class test_eig(LinalgTestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) assert_almost_equal(dot(a, evectors), evectors*evalues) class test_svd(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) assert_almost_equal(a, dot(u*s, vt)) class test_pinv(LinalgTestCase): def do(self, a, b): a_ginv = linalg.pinv(a) assert_almost_equal(dot(a, a_ginv), identity(a.shape[0])) class test_det(LinalgTestCase): def do(self, a, b): d = linalg.det(a) ev = linalg.eigvals(a) assert_almost_equal(d, multiply.reduce(ev)) class test_lstsq(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b) assert_almost_equal(b, dot(a, x)) assert_equal(rank, a.shape[0]) assert_almost_equal(sv, s) if __name__ == '__main__': NumpyTest().run()
Add test cases for linalg git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2818 94b884b6-d6fd-0310-90d3-974f1d3f35e1""" Test functions for linalg module """ from numpy.testing import * set_package_path() from numpy import array, single, double, csingle, cdouble, dot, identity, \ multiply from numpy import linalg restore_path() old_assert_almost_equal = assert_almost_equal def assert_almost_equal(a, b, **kw): if a.dtype.type in (single, csingle): decimal = 6 else: decimal = 12 old_assert_almost_equal(a, b, decimal=decimal, **kw) class LinalgTestCase(NumpyTestCase): def _check(self, dtype): a = array([[1.,2.], [3.,4.]], dtype=dtype) b = array([2., 1.], dtype=dtype) self.do(a, b) def check_single(self): self._check(single) def check_double(self): self._check(double) def check_csingle(self): self._check(csingle) def check_cdouble(self): self._check(cdouble) class test_solve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) class test_inv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(a.shape[0])) class test_eigvals(LinalgTestCase): def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) class test_eig(LinalgTestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) assert_almost_equal(dot(a, evectors), evectors*evalues) class test_svd(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) assert_almost_equal(a, dot(u*s, vt)) class test_pinv(LinalgTestCase): def do(self, a, b): a_ginv = linalg.pinv(a) assert_almost_equal(dot(a, a_ginv), identity(a.shape[0])) class test_det(LinalgTestCase): def do(self, a, b): d = linalg.det(a) ev = linalg.eigvals(a) assert_almost_equal(d, multiply.reduce(ev)) class test_lstsq(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b) assert_almost_equal(b, dot(a, x)) assert_equal(rank, a.shape[0]) assert_almost_equal(sv, s) if __name__ == '__main__': NumpyTest().run()
<commit_before><commit_msg>Add test cases for linalg git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@2818 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>""" Test functions for linalg module """ from numpy.testing import * set_package_path() from numpy import array, single, double, csingle, cdouble, dot, identity, \ multiply from numpy import linalg restore_path() old_assert_almost_equal = assert_almost_equal def assert_almost_equal(a, b, **kw): if a.dtype.type in (single, csingle): decimal = 6 else: decimal = 12 old_assert_almost_equal(a, b, decimal=decimal, **kw) class LinalgTestCase(NumpyTestCase): def _check(self, dtype): a = array([[1.,2.], [3.,4.]], dtype=dtype) b = array([2., 1.], dtype=dtype) self.do(a, b) def check_single(self): self._check(single) def check_double(self): self._check(double) def check_csingle(self): self._check(csingle) def check_cdouble(self): self._check(cdouble) class test_solve(LinalgTestCase): def do(self, a, b): x = linalg.solve(a, b) assert_almost_equal(b, dot(a, x)) class test_inv(LinalgTestCase): def do(self, a, b): a_inv = linalg.inv(a) assert_almost_equal(dot(a, a_inv), identity(a.shape[0])) class test_eigvals(LinalgTestCase): def do(self, a, b): ev = linalg.eigvals(a) evalues, evectors = linalg.eig(a) assert_almost_equal(ev, evalues) class test_eig(LinalgTestCase): def do(self, a, b): evalues, evectors = linalg.eig(a) assert_almost_equal(dot(a, evectors), evectors*evalues) class test_svd(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) assert_almost_equal(a, dot(u*s, vt)) class test_pinv(LinalgTestCase): def do(self, a, b): a_ginv = linalg.pinv(a) assert_almost_equal(dot(a, a_ginv), identity(a.shape[0])) class test_det(LinalgTestCase): def do(self, a, b): d = linalg.det(a) ev = linalg.eigvals(a) assert_almost_equal(d, multiply.reduce(ev)) class test_lstsq(LinalgTestCase): def do(self, a, b): u, s, vt = linalg.svd(a, 0) x, residuals, rank, sv = linalg.lstsq(a, b) assert_almost_equal(b, dot(a, x)) assert_equal(rank, a.shape[0]) assert_almost_equal(sv, s) if __name__ == '__main__': NumpyTest().run()
77ef603520652ab894128bf55f1f435bc17927c5
memcached_status.py
memcached_status.py
#!/usr/bin/env python import os import socket import subprocess import telnetlib def can_connect(host, port): """Check that we can make a connection to Memcached.""" try: c = telnetlib.Telnet(host, port) except socket.error: return False c.close() return True def main(): if not os.path.exists('/etc/init.d/memcached'): return hostname = subprocess.check_output('hostname').strip() localhost = '127.0.0.1' if can_connect(hostname, '11211') or can_connect(localhost, '11211'): print 'status ok memcached is reachable' else: print 'status err memcached is unreachable'
Add rudimentary status check for memcached
Add rudimentary status check for memcached
Python
apache-2.0
prometheanfire/rpc-openstack,jpmontez/rpc-openstack,andymcc/rpc-openstack,xeregin/rpc-openstack,robb-romans/rpc-openstack,stevelle/rpc-openstack,nrb/rpc-openstack,byronmccollum/rpc-openstack,briancurtin/rpc-maas,cloudnull/rpc-maas,nrb/rpc-openstack,cloudnull/rpc-maas,darrenchan/rpc-openstack,briancurtin/rpc-maas,stevelle/rpc-openstack,cfarquhar/rpc-maas,hughsaunders/rpc-openstack,major/rpc-openstack,mattt416/rpc-openstack,sigmavirus24/rpc-openstack,major/rpc-openstack,cloudnull/rpc-maas,rcbops/rpc-openstack,claco/rpc-openstack,git-harry/rpc-openstack,mattt416/rpc-openstack,xeregin/rpc-openstack,stevelle/rpc-openstack,cfarquhar/rpc-openstack,miguelgrinberg/rpc-openstack,briancurtin/rpc-maas,darrenchan/rpc-openstack,nrb/rpc-openstack,cloudnull/rpc-openstack,galstrom21/rpc-openstack,darrenchan/rpc-openstack,claco/rpc-openstack,busterswt/rpc-openstack,prometheanfire/rpc-openstack,sigmavirus24/rpc-openstack,andymcc/rpc-openstack,claco/rpc-openstack,npawelek/rpc-maas,darrenchan/rpc-openstack,jpmontez/rpc-openstack,robb-romans/rpc-openstack,byronmccollum/rpc-openstack,cfarquhar/rpc-openstack,BjoernT/rpc-openstack,mancdaz/rpc-openstack,shannonmitchell/rpc-openstack,cfarquhar/rpc-maas,jacobwagner/rpc-openstack,xeregin/rpc-openstack,hughsaunders/rpc-openstack,npawelek/rpc-maas,miguelgrinberg/rpc-openstack,cloudnull/rpc-openstack,andymcc/rpc-openstack,npawelek/rpc-maas,sigmavirus24/rpc-openstack,busterswt/rpc-openstack,miguelgrinberg/rpc-openstack,byronmccollum/rpc-openstack,mancdaz/rpc-openstack,jpmontez/rpc-openstack,sigmavirus24/rpc-openstack,cfarquhar/rpc-maas,galstrom21/rpc-openstack,mattt416/rpc-openstack,busterswt/rpc-openstack,jacobwagner/rpc-openstack,xeregin/rpc-openstack,BjoernT/rpc-openstack,rcbops/rpc-openstack,shannonmitchell/rpc-openstack,git-harry/rpc-openstack
Add rudimentary status check for memcached
#!/usr/bin/env python import os import socket import subprocess import telnetlib def can_connect(host, port): """Check that we can make a connection to Memcached.""" try: c = telnetlib.Telnet(host, port) except socket.error: return False c.close() return True def main(): if not os.path.exists('/etc/init.d/memcached'): return hostname = subprocess.check_output('hostname').strip() localhost = '127.0.0.1' if can_connect(hostname, '11211') or can_connect(localhost, '11211'): print 'status ok memcached is reachable' else: print 'status err memcached is unreachable'
<commit_before><commit_msg>Add rudimentary status check for memcached<commit_after>
#!/usr/bin/env python import os import socket import subprocess import telnetlib def can_connect(host, port): """Check that we can make a connection to Memcached.""" try: c = telnetlib.Telnet(host, port) except socket.error: return False c.close() return True def main(): if not os.path.exists('/etc/init.d/memcached'): return hostname = subprocess.check_output('hostname').strip() localhost = '127.0.0.1' if can_connect(hostname, '11211') or can_connect(localhost, '11211'): print 'status ok memcached is reachable' else: print 'status err memcached is unreachable'
Add rudimentary status check for memcached#!/usr/bin/env python import os import socket import subprocess import telnetlib def can_connect(host, port): """Check that we can make a connection to Memcached.""" try: c = telnetlib.Telnet(host, port) except socket.error: return False c.close() return True def main(): if not os.path.exists('/etc/init.d/memcached'): return hostname = subprocess.check_output('hostname').strip() localhost = '127.0.0.1' if can_connect(hostname, '11211') or can_connect(localhost, '11211'): print 'status ok memcached is reachable' else: print 'status err memcached is unreachable'
<commit_before><commit_msg>Add rudimentary status check for memcached<commit_after>#!/usr/bin/env python import os import socket import subprocess import telnetlib def can_connect(host, port): """Check that we can make a connection to Memcached.""" try: c = telnetlib.Telnet(host, port) except socket.error: return False c.close() return True def main(): if not os.path.exists('/etc/init.d/memcached'): return hostname = subprocess.check_output('hostname').strip() localhost = '127.0.0.1' if can_connect(hostname, '11211') or can_connect(localhost, '11211'): print 'status ok memcached is reachable' else: print 'status err memcached is unreachable'
883f1b1c28e76ade6632f762391cbb4a97918e12
direct/src/extensions_native/HTTPChannel_extensions.py
direct/src/extensions_native/HTTPChannel_extensions.py
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d.core import HTTPChannel from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) Dtool_funcToMethod(spawnTask, HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done Dtool_funcToMethod(doTask, HTTPChannel) del doTask #####################################################################
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d import core from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(spawnTask, core.HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(doTask, core.HTTPChannel) del doTask #####################################################################
Fix import error when compiling without OpenSSL support
Fix import error when compiling without OpenSSL support
Python
bsd-3-clause
brakhane/panda3d,grimfang/panda3d,chandler14362/panda3d,grimfang/panda3d,brakhane/panda3d,mgracer48/panda3d,grimfang/panda3d,brakhane/panda3d,mgracer48/panda3d,chandler14362/panda3d,chandler14362/panda3d,tobspr/panda3d,tobspr/panda3d,brakhane/panda3d,grimfang/panda3d,tobspr/panda3d,chandler14362/panda3d,brakhane/panda3d,grimfang/panda3d,chandler14362/panda3d,mgracer48/panda3d,grimfang/panda3d,tobspr/panda3d,mgracer48/panda3d,tobspr/panda3d,brakhane/panda3d,chandler14362/panda3d,chandler14362/panda3d,mgracer48/panda3d,chandler14362/panda3d,tobspr/panda3d,tobspr/panda3d,grimfang/panda3d,tobspr/panda3d,mgracer48/panda3d,grimfang/panda3d,chandler14362/panda3d,brakhane/panda3d,brakhane/panda3d,mgracer48/panda3d,tobspr/panda3d,brakhane/panda3d,chandler14362/panda3d,grimfang/panda3d,mgracer48/panda3d,mgracer48/panda3d,tobspr/panda3d,grimfang/panda3d
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d.core import HTTPChannel from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) Dtool_funcToMethod(spawnTask, HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done Dtool_funcToMethod(doTask, HTTPChannel) del doTask ##################################################################### Fix import error when compiling without OpenSSL support
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d import core from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(spawnTask, core.HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(doTask, core.HTTPChannel) del doTask #####################################################################
<commit_before>#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d.core import HTTPChannel from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) Dtool_funcToMethod(spawnTask, HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done Dtool_funcToMethod(doTask, HTTPChannel) del doTask ##################################################################### <commit_msg>Fix import error when compiling without OpenSSL support<commit_after>
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d import core from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(spawnTask, core.HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(doTask, core.HTTPChannel) del doTask #####################################################################
#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d.core import HTTPChannel from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) Dtool_funcToMethod(spawnTask, HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done Dtool_funcToMethod(doTask, HTTPChannel) del doTask ##################################################################### Fix import error when compiling without OpenSSL support#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d import core from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(spawnTask, core.HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(doTask, core.HTTPChannel) del doTask #####################################################################
<commit_before>#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d.core import HTTPChannel from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) Dtool_funcToMethod(spawnTask, HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done Dtool_funcToMethod(doTask, HTTPChannel) del doTask ##################################################################### <commit_msg>Fix import error when compiling without OpenSSL support<commit_after>#################################################################### #Dtool_funcToMethod(func, class) #del func ##################################################################### from panda3d import core from .extension_native_helpers import Dtool_funcToMethod """ HTTPChannel-extensions module: contains methods to extend functionality of the HTTPChannel class """ def spawnTask(self, name = None, callback = None, extraArgs = []): """Spawns a task to service the download recently requested via beginGetDocument(), etc., and/or downloadToFile() or downloadToRam(). If a callback is specified, that function is called when the download is complete, passing in the extraArgs given. Returns the newly-spawned task. """ if not name: name = str(self.getUrl()) from direct.task import Task task = Task.Task(self.doTask) task.callback = callback task.callbackArgs = extraArgs return taskMgr.add(task, name) if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(spawnTask, core.HTTPChannel) del spawnTask ##################################################################### def doTask(self, task): from direct.task import Task if self.run(): return Task.cont if task.callback: task.callback(*task.callbackArgs) return Task.done if hasattr(core, 'HTTPChannel'): Dtool_funcToMethod(doTask, core.HTTPChannel) del doTask #####################################################################
49ffec0b634990a99d60326dbe9a1f2583300b5d
analysis/plot-trial-progress.py
analysis/plot-trial-progress.py
import climate import lmj.plot import source def main(subject): subj = source.Subject(subject) ax = lmj.plot.axes(111, projection='3d', aspect='equal') for i, block in enumerate(subj.blocks): trial = block.trials[0] trial.load() x, y, z = trial.marker('r-fing-index') ax.plot(x, z, zs=y, color=(i / len(subj.blocks), 0, 0), alpha=0.9) lmj.plot.show() if __name__ == '__main__': climate.call(main)
Add a short script for plotting successive trials.
Add a short script for plotting successive trials.
Python
mit
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
Add a short script for plotting successive trials.
import climate import lmj.plot import source def main(subject): subj = source.Subject(subject) ax = lmj.plot.axes(111, projection='3d', aspect='equal') for i, block in enumerate(subj.blocks): trial = block.trials[0] trial.load() x, y, z = trial.marker('r-fing-index') ax.plot(x, z, zs=y, color=(i / len(subj.blocks), 0, 0), alpha=0.9) lmj.plot.show() if __name__ == '__main__': climate.call(main)
<commit_before><commit_msg>Add a short script for plotting successive trials.<commit_after>
import climate import lmj.plot import source def main(subject): subj = source.Subject(subject) ax = lmj.plot.axes(111, projection='3d', aspect='equal') for i, block in enumerate(subj.blocks): trial = block.trials[0] trial.load() x, y, z = trial.marker('r-fing-index') ax.plot(x, z, zs=y, color=(i / len(subj.blocks), 0, 0), alpha=0.9) lmj.plot.show() if __name__ == '__main__': climate.call(main)
Add a short script for plotting successive trials.import climate import lmj.plot import source def main(subject): subj = source.Subject(subject) ax = lmj.plot.axes(111, projection='3d', aspect='equal') for i, block in enumerate(subj.blocks): trial = block.trials[0] trial.load() x, y, z = trial.marker('r-fing-index') ax.plot(x, z, zs=y, color=(i / len(subj.blocks), 0, 0), alpha=0.9) lmj.plot.show() if __name__ == '__main__': climate.call(main)
<commit_before><commit_msg>Add a short script for plotting successive trials.<commit_after>import climate import lmj.plot import source def main(subject): subj = source.Subject(subject) ax = lmj.plot.axes(111, projection='3d', aspect='equal') for i, block in enumerate(subj.blocks): trial = block.trials[0] trial.load() x, y, z = trial.marker('r-fing-index') ax.plot(x, z, zs=y, color=(i / len(subj.blocks), 0, 0), alpha=0.9) lmj.plot.show() if __name__ == '__main__': climate.call(main)
44fb076a04388b57e8d517eb4835fe7b4b6720d5
aiopg/sa/__init__.py
aiopg/sa/__init__.py
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect)
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect, Engine from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect, Engine)
Make Engine public importable name
Make Engine public importable name
Python
bsd-2-clause
luhn/aiopg,nerandell/aiopg,eirnym/aiopg,graingert/aiopg,aio-libs/aiopg,hyzhak/aiopg
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect) Make Engine public importable name
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect, Engine from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect, Engine)
<commit_before>"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect) <commit_msg>Make Engine public importable name<commit_after>
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect, Engine from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect, Engine)
"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect) Make Engine public importable name"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect, Engine from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect, Engine)
<commit_before>"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect) <commit_msg>Make Engine public importable name<commit_after>"""Optional support for sqlalchemy.sql dynamic query generation.""" from .engine import create_engine, dialect, Engine from .connection import SAConnection from .exc import (Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError) __all__ = ('dialect', 'create_engine', 'SAConnection', 'Error', 'ArgumentError', 'InvalidRequestError', 'NoSuchColumnError', 'ResourceClosedError', 'Engine') (SAConnection, Error, ArgumentError, InvalidRequestError, NoSuchColumnError, ResourceClosedError, create_engine, dialect, Engine)
2ec6da5fbb69d322d89b972210d3a521cd17b275
pyQuantuccia/tests/test_module.py
pyQuantuccia/tests/test_module.py
import pyQuantuccia def test_not_a_real_test(): _dir = str(pyQuantuccia.__dir__()) _dict = str(pyQuantuccia.__dict__) s = " ".join([_dict, _dir]) print(s) assert(s == "")
Add a test which lets us see what's in our module.
Add a test which lets us see what's in our module.
Python
bsd-3-clause
jwg4/pyQuantuccia,jwg4/pyQuantuccia
Add a test which lets us see what's in our module.
import pyQuantuccia def test_not_a_real_test(): _dir = str(pyQuantuccia.__dir__()) _dict = str(pyQuantuccia.__dict__) s = " ".join([_dict, _dir]) print(s) assert(s == "")
<commit_before><commit_msg>Add a test which lets us see what's in our module.<commit_after>
import pyQuantuccia def test_not_a_real_test(): _dir = str(pyQuantuccia.__dir__()) _dict = str(pyQuantuccia.__dict__) s = " ".join([_dict, _dir]) print(s) assert(s == "")
Add a test which lets us see what's in our module.import pyQuantuccia def test_not_a_real_test(): _dir = str(pyQuantuccia.__dir__()) _dict = str(pyQuantuccia.__dict__) s = " ".join([_dict, _dir]) print(s) assert(s == "")
<commit_before><commit_msg>Add a test which lets us see what's in our module.<commit_after>import pyQuantuccia def test_not_a_real_test(): _dir = str(pyQuantuccia.__dir__()) _dict = str(pyQuantuccia.__dict__) s = " ".join([_dict, _dir]) print(s) assert(s == "")
2a34dd198110401bd485552ad857c3d4f26c7b8c
csunplugged/tests/resources/views/test_generate_resource.py
csunplugged/tests/resources/views/test_generate_resource.py
import os from django.test import tag from django.urls import reverse from tests.BaseTestWithDB import BaseTestWithDB from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator from utils.create_query_string import query_string @tag("resource") class GenerateResourceTest(BaseTestWithDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.test_data = ResourcesTestDataGenerator() self.language = "en" def test_generate_view_valid_slug(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual( response.get("Content-Disposition"), 'attachment; filename="Resource Grid (a4).pdf"' ) def test_generate_view_valid_slug_production_cache(self): os.environ["DJANGO_PRODUCTION"] = "TRUE" resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(302, response.status_code) self.assertEqual( response.url, "/staticfiles/resources/Resource%20Grid%20(a4).pdf" ) def test_generate_view_valid_slug_missing_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } url = reverse("resources:generate", kwargs=kwargs) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_generate_view_valid_slug_invalid_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "b7" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(404, response.status_code)
Add tests for generate resource view
Add tests for generate resource view
Python
mit
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
Add tests for generate resource view
import os from django.test import tag from django.urls import reverse from tests.BaseTestWithDB import BaseTestWithDB from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator from utils.create_query_string import query_string @tag("resource") class GenerateResourceTest(BaseTestWithDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.test_data = ResourcesTestDataGenerator() self.language = "en" def test_generate_view_valid_slug(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual( response.get("Content-Disposition"), 'attachment; filename="Resource Grid (a4).pdf"' ) def test_generate_view_valid_slug_production_cache(self): os.environ["DJANGO_PRODUCTION"] = "TRUE" resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(302, response.status_code) self.assertEqual( response.url, "/staticfiles/resources/Resource%20Grid%20(a4).pdf" ) def test_generate_view_valid_slug_missing_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } url = reverse("resources:generate", kwargs=kwargs) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_generate_view_valid_slug_invalid_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "b7" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(404, response.status_code)
<commit_before><commit_msg>Add tests for generate resource view<commit_after>
import os from django.test import tag from django.urls import reverse from tests.BaseTestWithDB import BaseTestWithDB from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator from utils.create_query_string import query_string @tag("resource") class GenerateResourceTest(BaseTestWithDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.test_data = ResourcesTestDataGenerator() self.language = "en" def test_generate_view_valid_slug(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual( response.get("Content-Disposition"), 'attachment; filename="Resource Grid (a4).pdf"' ) def test_generate_view_valid_slug_production_cache(self): os.environ["DJANGO_PRODUCTION"] = "TRUE" resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(302, response.status_code) self.assertEqual( response.url, "/staticfiles/resources/Resource%20Grid%20(a4).pdf" ) def test_generate_view_valid_slug_missing_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } url = reverse("resources:generate", kwargs=kwargs) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_generate_view_valid_slug_invalid_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "b7" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(404, response.status_code)
Add tests for generate resource viewimport os from django.test import tag from django.urls import reverse from tests.BaseTestWithDB import BaseTestWithDB from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator from utils.create_query_string import query_string @tag("resource") class GenerateResourceTest(BaseTestWithDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.test_data = ResourcesTestDataGenerator() self.language = "en" def test_generate_view_valid_slug(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual( response.get("Content-Disposition"), 'attachment; filename="Resource Grid (a4).pdf"' ) def test_generate_view_valid_slug_production_cache(self): os.environ["DJANGO_PRODUCTION"] = "TRUE" resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(302, response.status_code) self.assertEqual( response.url, "/staticfiles/resources/Resource%20Grid%20(a4).pdf" ) def test_generate_view_valid_slug_missing_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } url = reverse("resources:generate", kwargs=kwargs) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_generate_view_valid_slug_invalid_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "b7" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(404, response.status_code)
<commit_before><commit_msg>Add tests for generate resource view<commit_after>import os from django.test import tag from django.urls import reverse from tests.BaseTestWithDB import BaseTestWithDB from tests.resources.ResourcesTestDataGenerator import ResourcesTestDataGenerator from utils.create_query_string import query_string @tag("resource") class GenerateResourceTest(BaseTestWithDB): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.test_data = ResourcesTestDataGenerator() self.language = "en" def test_generate_view_valid_slug(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(200, response.status_code) self.assertEqual( response.get("Content-Disposition"), 'attachment; filename="Resource Grid (a4).pdf"' ) def test_generate_view_valid_slug_production_cache(self): os.environ["DJANGO_PRODUCTION"] = "TRUE" resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "a4" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(302, response.status_code) self.assertEqual( response.url, "/staticfiles/resources/Resource%20Grid%20(a4).pdf" ) def test_generate_view_valid_slug_missing_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } url = reverse("resources:generate", kwargs=kwargs) response = self.client.get(url) self.assertEqual(404, response.status_code) def test_generate_view_valid_slug_invalid_parameter(self): resource = self.test_data.create_resource( "grid", "Grid", "resources/grid.html", "GridResourceGenerator", ) kwargs = { "resource_slug": resource.slug, } get_parameters = { "paper_size": "b7" } url = reverse("resources:generate", kwargs=kwargs) url += query_string(get_parameters) response = self.client.get(url) self.assertEqual(404, response.status_code)
303076c230123d6d9f0e8479f90057666de8db76
datasets/management/commands/remove_non_leaf_annotations.py
datasets/management/commands/remove_non_leaf_annotations.py
from django.core.management.base import BaseCommand from datasets.models import * import json from datasets.models import Annotation, Dataset class Command(BaseCommand): help = 'Remove annotations associated with audio clips having a children from which it propagates its ground truth'\ 'Use it as python manage.py remove_non_leaf_annotations DATASET_ID' class ProgressBar: """ Progress bar """ def __init__(self, valmax, maxbar, title): if valmax == 0: valmax = 1 if maxbar > 200: maxbar = 200 self.valmax = valmax self.maxbar = maxbar self.title = title print ('') def update(self, val): import sys # format if val > self.valmax: val = self.valmax # process perc = round((float(val) / float(self.valmax)) * 100) scale = 100.0 / float(self.maxbar) bar = int(perc / scale) # render out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax) sys.stdout.write(out) sys.stdout.flush() def add_arguments(self, parser): parser.add_argument('dataset_id', type=int) def handle(self, *args, **options): dataset_id = options['dataset_id'] dataset = Dataset.objects.get(id=dataset_id) taxonomy = dataset.taxonomy # find annotations that should be removed (having an annotation that correspond to a children which propagate) taxonomy_nodes = dataset.taxonomy.taxonomynode_set.all() bar = self.ProgressBar(len(taxonomy_nodes), 30, 'Processing...') bar.update(0) annotations_to_remove = [] for idx, node in enumerate(taxonomy_nodes): bar.update(idx+1) children_node = taxonomy.get_all_propagate_from_children(node.node_id) for annotation in Annotation.objects.filter(taxonomy_node=node): if Annotation.objects.filter(taxonomy_node__in=children_node, sound_dataset__sound=annotation.sound_dataset.sound).count() > 0: annotations_to_remove.append(annotation) # remove only the annotations that have no vote annotations_id_to_remove = [a.id for a in annotations_to_remove if a.votes.all().count() == 0] Annotation.objects.filter(id__in=annotations_id_to_remove).delete() print('\n') print('{0} annotations where deleted'.format(len(annotations_id_to_remove)))
Add command remove non leaf annotations
Add command remove non leaf annotations
Python
agpl-3.0
MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets
Add command remove non leaf annotations
from django.core.management.base import BaseCommand from datasets.models import * import json from datasets.models import Annotation, Dataset class Command(BaseCommand): help = 'Remove annotations associated with audio clips having a children from which it propagates its ground truth'\ 'Use it as python manage.py remove_non_leaf_annotations DATASET_ID' class ProgressBar: """ Progress bar """ def __init__(self, valmax, maxbar, title): if valmax == 0: valmax = 1 if maxbar > 200: maxbar = 200 self.valmax = valmax self.maxbar = maxbar self.title = title print ('') def update(self, val): import sys # format if val > self.valmax: val = self.valmax # process perc = round((float(val) / float(self.valmax)) * 100) scale = 100.0 / float(self.maxbar) bar = int(perc / scale) # render out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax) sys.stdout.write(out) sys.stdout.flush() def add_arguments(self, parser): parser.add_argument('dataset_id', type=int) def handle(self, *args, **options): dataset_id = options['dataset_id'] dataset = Dataset.objects.get(id=dataset_id) taxonomy = dataset.taxonomy # find annotations that should be removed (having an annotation that correspond to a children which propagate) taxonomy_nodes = dataset.taxonomy.taxonomynode_set.all() bar = self.ProgressBar(len(taxonomy_nodes), 30, 'Processing...') bar.update(0) annotations_to_remove = [] for idx, node in enumerate(taxonomy_nodes): bar.update(idx+1) children_node = taxonomy.get_all_propagate_from_children(node.node_id) for annotation in Annotation.objects.filter(taxonomy_node=node): if Annotation.objects.filter(taxonomy_node__in=children_node, sound_dataset__sound=annotation.sound_dataset.sound).count() > 0: annotations_to_remove.append(annotation) # remove only the annotations that have no vote annotations_id_to_remove = [a.id for a in annotations_to_remove if a.votes.all().count() == 0] Annotation.objects.filter(id__in=annotations_id_to_remove).delete() print('\n') print('{0} annotations where deleted'.format(len(annotations_id_to_remove)))
<commit_before><commit_msg>Add command remove non leaf annotations<commit_after>
from django.core.management.base import BaseCommand from datasets.models import * import json from datasets.models import Annotation, Dataset class Command(BaseCommand): help = 'Remove annotations associated with audio clips having a children from which it propagates its ground truth'\ 'Use it as python manage.py remove_non_leaf_annotations DATASET_ID' class ProgressBar: """ Progress bar """ def __init__(self, valmax, maxbar, title): if valmax == 0: valmax = 1 if maxbar > 200: maxbar = 200 self.valmax = valmax self.maxbar = maxbar self.title = title print ('') def update(self, val): import sys # format if val > self.valmax: val = self.valmax # process perc = round((float(val) / float(self.valmax)) * 100) scale = 100.0 / float(self.maxbar) bar = int(perc / scale) # render out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax) sys.stdout.write(out) sys.stdout.flush() def add_arguments(self, parser): parser.add_argument('dataset_id', type=int) def handle(self, *args, **options): dataset_id = options['dataset_id'] dataset = Dataset.objects.get(id=dataset_id) taxonomy = dataset.taxonomy # find annotations that should be removed (having an annotation that correspond to a children which propagate) taxonomy_nodes = dataset.taxonomy.taxonomynode_set.all() bar = self.ProgressBar(len(taxonomy_nodes), 30, 'Processing...') bar.update(0) annotations_to_remove = [] for idx, node in enumerate(taxonomy_nodes): bar.update(idx+1) children_node = taxonomy.get_all_propagate_from_children(node.node_id) for annotation in Annotation.objects.filter(taxonomy_node=node): if Annotation.objects.filter(taxonomy_node__in=children_node, sound_dataset__sound=annotation.sound_dataset.sound).count() > 0: annotations_to_remove.append(annotation) # remove only the annotations that have no vote annotations_id_to_remove = [a.id for a in annotations_to_remove if a.votes.all().count() == 0] Annotation.objects.filter(id__in=annotations_id_to_remove).delete() print('\n') print('{0} annotations where deleted'.format(len(annotations_id_to_remove)))
Add command remove non leaf annotationsfrom django.core.management.base import BaseCommand from datasets.models import * import json from datasets.models import Annotation, Dataset class Command(BaseCommand): help = 'Remove annotations associated with audio clips having a children from which it propagates its ground truth'\ 'Use it as python manage.py remove_non_leaf_annotations DATASET_ID' class ProgressBar: """ Progress bar """ def __init__(self, valmax, maxbar, title): if valmax == 0: valmax = 1 if maxbar > 200: maxbar = 200 self.valmax = valmax self.maxbar = maxbar self.title = title print ('') def update(self, val): import sys # format if val > self.valmax: val = self.valmax # process perc = round((float(val) / float(self.valmax)) * 100) scale = 100.0 / float(self.maxbar) bar = int(perc / scale) # render out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax) sys.stdout.write(out) sys.stdout.flush() def add_arguments(self, parser): parser.add_argument('dataset_id', type=int) def handle(self, *args, **options): dataset_id = options['dataset_id'] dataset = Dataset.objects.get(id=dataset_id) taxonomy = dataset.taxonomy # find annotations that should be removed (having an annotation that correspond to a children which propagate) taxonomy_nodes = dataset.taxonomy.taxonomynode_set.all() bar = self.ProgressBar(len(taxonomy_nodes), 30, 'Processing...') bar.update(0) annotations_to_remove = [] for idx, node in enumerate(taxonomy_nodes): bar.update(idx+1) children_node = taxonomy.get_all_propagate_from_children(node.node_id) for annotation in Annotation.objects.filter(taxonomy_node=node): if Annotation.objects.filter(taxonomy_node__in=children_node, sound_dataset__sound=annotation.sound_dataset.sound).count() > 0: annotations_to_remove.append(annotation) # remove only the annotations that have no vote annotations_id_to_remove = [a.id for a in annotations_to_remove if a.votes.all().count() == 0] Annotation.objects.filter(id__in=annotations_id_to_remove).delete() print('\n') print('{0} annotations where deleted'.format(len(annotations_id_to_remove)))
<commit_before><commit_msg>Add command remove non leaf annotations<commit_after>from django.core.management.base import BaseCommand from datasets.models import * import json from datasets.models import Annotation, Dataset class Command(BaseCommand): help = 'Remove annotations associated with audio clips having a children from which it propagates its ground truth'\ 'Use it as python manage.py remove_non_leaf_annotations DATASET_ID' class ProgressBar: """ Progress bar """ def __init__(self, valmax, maxbar, title): if valmax == 0: valmax = 1 if maxbar > 200: maxbar = 200 self.valmax = valmax self.maxbar = maxbar self.title = title print ('') def update(self, val): import sys # format if val > self.valmax: val = self.valmax # process perc = round((float(val) / float(self.valmax)) * 100) scale = 100.0 / float(self.maxbar) bar = int(perc / scale) # render out = '\r %20s [%s%s] %3d / %3d' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax) sys.stdout.write(out) sys.stdout.flush() def add_arguments(self, parser): parser.add_argument('dataset_id', type=int) def handle(self, *args, **options): dataset_id = options['dataset_id'] dataset = Dataset.objects.get(id=dataset_id) taxonomy = dataset.taxonomy # find annotations that should be removed (having an annotation that correspond to a children which propagate) taxonomy_nodes = dataset.taxonomy.taxonomynode_set.all() bar = self.ProgressBar(len(taxonomy_nodes), 30, 'Processing...') bar.update(0) annotations_to_remove = [] for idx, node in enumerate(taxonomy_nodes): bar.update(idx+1) children_node = taxonomy.get_all_propagate_from_children(node.node_id) for annotation in Annotation.objects.filter(taxonomy_node=node): if Annotation.objects.filter(taxonomy_node__in=children_node, sound_dataset__sound=annotation.sound_dataset.sound).count() > 0: annotations_to_remove.append(annotation) # remove only the annotations that have no vote annotations_id_to_remove = [a.id for a in annotations_to_remove if a.votes.all().count() == 0] Annotation.objects.filter(id__in=annotations_id_to_remove).delete() print('\n') print('{0} annotations where deleted'.format(len(annotations_id_to_remove)))
fa6c2c43289eeee1c0efab45101149b49be1b5cb
scrapi/processing/osf/__init__.py
scrapi/processing/osf/__init__.py
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) if not resource: resource = crud.create_resource(resource_norm, resource_hash) else: crud.dump_metadata(resource_norm, {'nid': resource}) if not report: report = crud.create_report(report_norm, resource, report_hash) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) crud.update_node(report, report_norm) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): found, _hash = collision.already_processed(raw_doc) if found: return normalized['meta'] = { 'docHash': _hash } if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) report_norm['meta']['uids'] = report_hash resource_norm['meta']['uids'] = resource_hash if not resource: resource = crud.create_resource(resource_norm) else: crud.dump_metadata(resource_norm, {'nid': resource}) crud.update_node(report, report_norm) if not report: report = crud.create_report(report_norm, resource) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
Update dumping to osf logic
Update dumping to osf logic
Python
apache-2.0
ostwald/scrapi,mehanig/scrapi,felliott/scrapi,alexgarciac/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,icereval/scrapi,erinspace/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,jeffreyliu3230/scrapi,fabianvf/scrapi,erinspace/scrapi,mehanig/scrapi
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) if not resource: resource = crud.create_resource(resource_norm, resource_hash) else: crud.dump_metadata(resource_norm, {'nid': resource}) if not report: report = crud.create_report(report_norm, resource, report_hash) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) crud.update_node(report, report_norm) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm) Update dumping to osf logic
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): found, _hash = collision.already_processed(raw_doc) if found: return normalized['meta'] = { 'docHash': _hash } if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) report_norm['meta']['uids'] = report_hash resource_norm['meta']['uids'] = resource_hash if not resource: resource = crud.create_resource(resource_norm) else: crud.dump_metadata(resource_norm, {'nid': resource}) crud.update_node(report, report_norm) if not report: report = crud.create_report(report_norm, resource) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
<commit_before>from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) if not resource: resource = crud.create_resource(resource_norm, resource_hash) else: crud.dump_metadata(resource_norm, {'nid': resource}) if not report: report = crud.create_report(report_norm, resource, report_hash) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) crud.update_node(report, report_norm) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm) <commit_msg>Update dumping to osf logic<commit_after>
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): found, _hash = collision.already_processed(raw_doc) if found: return normalized['meta'] = { 'docHash': _hash } if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) report_norm['meta']['uids'] = report_hash resource_norm['meta']['uids'] = resource_hash if not resource: resource = crud.create_resource(resource_norm) else: crud.dump_metadata(resource_norm, {'nid': resource}) crud.update_node(report, report_norm) if not report: report = crud.create_report(report_norm, resource) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) if not resource: resource = crud.create_resource(resource_norm, resource_hash) else: crud.dump_metadata(resource_norm, {'nid': resource}) if not report: report = crud.create_report(report_norm, resource, report_hash) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) crud.update_node(report, report_norm) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm) Update dumping to osf logicfrom scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): found, _hash = collision.already_processed(raw_doc) if found: return normalized['meta'] = { 'docHash': _hash } if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) report_norm['meta']['uids'] = report_hash resource_norm['meta']['uids'] = resource_hash if not resource: resource = crud.create_resource(resource_norm) else: crud.dump_metadata(resource_norm, {'nid': resource}) crud.update_node(report, report_norm) if not report: report = crud.create_report(report_norm, resource) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
<commit_before>from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) if not resource: resource = crud.create_resource(resource_norm, resource_hash) else: crud.dump_metadata(resource_norm, {'nid': resource}) if not report: report = crud.create_report(report_norm, resource, report_hash) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) crud.update_node(report, report_norm) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm) <commit_msg>Update dumping to osf logic<commit_after>from scrapi.processing.osf import crud from scrapi.processing.osf import collision from scrapi.processing.base import BaseProcessor class OSFProcessor(BaseProcessor): NAME = 'osf' def process_normalized(self, raw_doc, normalized): found, _hash = collision.already_processed(raw_doc) if found: return normalized['meta'] = { 'docHash': _hash } if crud.is_event(normalized): crud.dump_metdata(normalized, {}) return normalized['collisionCategory'] = crud.get_collision_cat(normalized['source']) report_norm = normalized resource_norm = crud.clean_report(normalized) report_hash = collision.generate_report_hash_list(report_norm) resource_hash = collision.generate_resource_hash_list(resource_norm) report = collision.detect_collisions(report_hash) resource = collision.detect_collisions(resource_hash, is_resource=True) report_norm['meta']['uids'] = report_hash resource_norm['meta']['uids'] = resource_hash if not resource: resource = crud.create_resource(resource_norm) else: crud.dump_metadata(resource_norm, {'nid': resource}) crud.update_node(report, report_norm) if not report: report = crud.create_report(report_norm, resource) else: crud.dump_metadata(report_norm, {'nid': report, 'pid': resource}) if not crud.is_claimed(resource): crud.update_node(resource, resource_norm)
0155a9b005a5b62b8245b29ac49cd5766f12b218
examples/python/disassembly_mode.py
examples/python/disassembly_mode.py
""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """ import lldb class DisassemblyMode: def __init__(self, debugger, unused): self.dbg = debugger self.interp = debugger.GetCommandInterpreter() self.store_state() self.mode_off = True def store_state(self): self.dis_count = self.get_string_value("stop-disassembly-count") self.dis_display = self.get_string_value("stop-disassembly-display") self.before_count = self.get_string_value("stop-line-count-before") self.after_count = self.get_string_value("stop-line-count-after") def get_string_value(self, setting): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings show " + setting, result) value = result.GetOutput().split(" = ")[1].rstrip("\n") return value def set_value(self, setting, value): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings set " + setting + " " + value, result) def __call__(self, debugger, command, exe_ctx, result): if self.mode_off: self.mode_off = False self.store_state() self.set_value("stop-disassembly-display","always") self.set_value("stop-disassembly-count", "8") self.set_value("stop-line-count-before", "0") self.set_value("stop-line-count-after", "0") result.AppendMessage("Disassembly mode on.") else: self.mode_off = True self.set_value("stop-disassembly-display",self.dis_display) self.set_value("stop-disassembly-count", self.dis_count) self.set_value("stop-line-count-before", self.before_count) self.set_value("stop-line-count-after", self.after_count) result.AppendMessage("Disassembly mode off.") def get_short_help(self): return "Toggles between a disassembly only mode and normal source mode\n" def __lldb_init_module(debugger, unused): debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
Add an example command to toggle between disassembly-only and source mode.
Add an example command to toggle between disassembly-only and source mode. Sometimes you are debugging in source, but you really only want to see the disassembly. That's easy to do but you have to set a few variables. This command toggles between your old values, and a disassembly only mode. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@300902 91177308-0d34-0410-b5e6-96231b3b80d8
Python
apache-2.0
llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb
Add an example command to toggle between disassembly-only and source mode. Sometimes you are debugging in source, but you really only want to see the disassembly. That's easy to do but you have to set a few variables. This command toggles between your old values, and a disassembly only mode. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@300902 91177308-0d34-0410-b5e6-96231b3b80d8
""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """ import lldb class DisassemblyMode: def __init__(self, debugger, unused): self.dbg = debugger self.interp = debugger.GetCommandInterpreter() self.store_state() self.mode_off = True def store_state(self): self.dis_count = self.get_string_value("stop-disassembly-count") self.dis_display = self.get_string_value("stop-disassembly-display") self.before_count = self.get_string_value("stop-line-count-before") self.after_count = self.get_string_value("stop-line-count-after") def get_string_value(self, setting): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings show " + setting, result) value = result.GetOutput().split(" = ")[1].rstrip("\n") return value def set_value(self, setting, value): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings set " + setting + " " + value, result) def __call__(self, debugger, command, exe_ctx, result): if self.mode_off: self.mode_off = False self.store_state() self.set_value("stop-disassembly-display","always") self.set_value("stop-disassembly-count", "8") self.set_value("stop-line-count-before", "0") self.set_value("stop-line-count-after", "0") result.AppendMessage("Disassembly mode on.") else: self.mode_off = True self.set_value("stop-disassembly-display",self.dis_display) self.set_value("stop-disassembly-count", self.dis_count) self.set_value("stop-line-count-before", self.before_count) self.set_value("stop-line-count-after", self.after_count) result.AppendMessage("Disassembly mode off.") def get_short_help(self): return "Toggles between a disassembly only mode and normal source mode\n" def __lldb_init_module(debugger, unused): debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
<commit_before><commit_msg>Add an example command to toggle between disassembly-only and source mode. Sometimes you are debugging in source, but you really only want to see the disassembly. That's easy to do but you have to set a few variables. This command toggles between your old values, and a disassembly only mode. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@300902 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """ import lldb class DisassemblyMode: def __init__(self, debugger, unused): self.dbg = debugger self.interp = debugger.GetCommandInterpreter() self.store_state() self.mode_off = True def store_state(self): self.dis_count = self.get_string_value("stop-disassembly-count") self.dis_display = self.get_string_value("stop-disassembly-display") self.before_count = self.get_string_value("stop-line-count-before") self.after_count = self.get_string_value("stop-line-count-after") def get_string_value(self, setting): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings show " + setting, result) value = result.GetOutput().split(" = ")[1].rstrip("\n") return value def set_value(self, setting, value): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings set " + setting + " " + value, result) def __call__(self, debugger, command, exe_ctx, result): if self.mode_off: self.mode_off = False self.store_state() self.set_value("stop-disassembly-display","always") self.set_value("stop-disassembly-count", "8") self.set_value("stop-line-count-before", "0") self.set_value("stop-line-count-after", "0") result.AppendMessage("Disassembly mode on.") else: self.mode_off = True self.set_value("stop-disassembly-display",self.dis_display) self.set_value("stop-disassembly-count", self.dis_count) self.set_value("stop-line-count-before", self.before_count) self.set_value("stop-line-count-after", self.after_count) result.AppendMessage("Disassembly mode off.") def get_short_help(self): return "Toggles between a disassembly only mode and normal source mode\n" def __lldb_init_module(debugger, unused): debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
Add an example command to toggle between disassembly-only and source mode. Sometimes you are debugging in source, but you really only want to see the disassembly. That's easy to do but you have to set a few variables. This command toggles between your old values, and a disassembly only mode. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@300902 91177308-0d34-0410-b5e6-96231b3b80d8""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """ import lldb class DisassemblyMode: def __init__(self, debugger, unused): self.dbg = debugger self.interp = debugger.GetCommandInterpreter() self.store_state() self.mode_off = True def store_state(self): self.dis_count = self.get_string_value("stop-disassembly-count") self.dis_display = self.get_string_value("stop-disassembly-display") self.before_count = self.get_string_value("stop-line-count-before") self.after_count = self.get_string_value("stop-line-count-after") def get_string_value(self, setting): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings show " + setting, result) value = result.GetOutput().split(" = ")[1].rstrip("\n") return value def set_value(self, setting, value): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings set " + setting + " " + value, result) def __call__(self, debugger, command, exe_ctx, result): if self.mode_off: self.mode_off = False self.store_state() self.set_value("stop-disassembly-display","always") self.set_value("stop-disassembly-count", "8") self.set_value("stop-line-count-before", "0") self.set_value("stop-line-count-after", "0") result.AppendMessage("Disassembly mode on.") else: self.mode_off = True self.set_value("stop-disassembly-display",self.dis_display) self.set_value("stop-disassembly-count", self.dis_count) self.set_value("stop-line-count-before", self.before_count) self.set_value("stop-line-count-after", self.after_count) result.AppendMessage("Disassembly mode off.") def get_short_help(self): return "Toggles between a disassembly only mode and normal source mode\n" def __lldb_init_module(debugger, unused): debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
<commit_before><commit_msg>Add an example command to toggle between disassembly-only and source mode. Sometimes you are debugging in source, but you really only want to see the disassembly. That's easy to do but you have to set a few variables. This command toggles between your old values, and a disassembly only mode. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@300902 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>""" Adds the 'toggle-disassembly' command to switch you into a disassembly only mode """ import lldb class DisassemblyMode: def __init__(self, debugger, unused): self.dbg = debugger self.interp = debugger.GetCommandInterpreter() self.store_state() self.mode_off = True def store_state(self): self.dis_count = self.get_string_value("stop-disassembly-count") self.dis_display = self.get_string_value("stop-disassembly-display") self.before_count = self.get_string_value("stop-line-count-before") self.after_count = self.get_string_value("stop-line-count-after") def get_string_value(self, setting): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings show " + setting, result) value = result.GetOutput().split(" = ")[1].rstrip("\n") return value def set_value(self, setting, value): result = lldb.SBCommandReturnObject() self.interp.HandleCommand("settings set " + setting + " " + value, result) def __call__(self, debugger, command, exe_ctx, result): if self.mode_off: self.mode_off = False self.store_state() self.set_value("stop-disassembly-display","always") self.set_value("stop-disassembly-count", "8") self.set_value("stop-line-count-before", "0") self.set_value("stop-line-count-after", "0") result.AppendMessage("Disassembly mode on.") else: self.mode_off = True self.set_value("stop-disassembly-display",self.dis_display) self.set_value("stop-disassembly-count", self.dis_count) self.set_value("stop-line-count-before", self.before_count) self.set_value("stop-line-count-after", self.after_count) result.AppendMessage("Disassembly mode off.") def get_short_help(self): return "Toggles between a disassembly only mode and normal source mode\n" def __lldb_init_module(debugger, unused): debugger.HandleCommand("command script add -c disassembly_mode.DisassemblyMode toggle-disassembly")
6fcfd8d09db54996a9651f1ed8b82222e36d0673
project/apps/api/migrations/0011_auto_20160518_1522.py
project/apps/api/migrations/0011_auto_20160518_1522.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-18 22:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0010_remove_song_chart'), ] operations = [ migrations.RemoveField( model_name='score', name='asterisk_test', ), migrations.RemoveField( model_name='score', name='dixon_test', ), ]
Remove dixon and asterisk test
Remove dixon and asterisk test
Python
bsd-2-clause
dbinetti/barberscore,dbinetti/barberscore-django,barberscore/barberscore-api,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,barberscore/barberscore-api,dbinetti/barberscore
Remove dixon and asterisk test
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-18 22:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0010_remove_song_chart'), ] operations = [ migrations.RemoveField( model_name='score', name='asterisk_test', ), migrations.RemoveField( model_name='score', name='dixon_test', ), ]
<commit_before><commit_msg>Remove dixon and asterisk test<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-18 22:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0010_remove_song_chart'), ] operations = [ migrations.RemoveField( model_name='score', name='asterisk_test', ), migrations.RemoveField( model_name='score', name='dixon_test', ), ]
Remove dixon and asterisk test# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-18 22:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0010_remove_song_chart'), ] operations = [ migrations.RemoveField( model_name='score', name='asterisk_test', ), migrations.RemoveField( model_name='score', name='dixon_test', ), ]
<commit_before><commit_msg>Remove dixon and asterisk test<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-05-18 22:22 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0010_remove_song_chart'), ] operations = [ migrations.RemoveField( model_name='score', name='asterisk_test', ), migrations.RemoveField( model_name='score', name='dixon_test', ), ]
7fb8b719a89adae9f4d094983fc4c0e946580389
tests/sample_smart_tools/shasum.py
tests/sample_smart_tools/shasum.py
import argparse from dtoolcore import DataSet def main(dataset_path, identifier, output_path): dataset = DataSet.from_path(dataset_path) fpath = dataset.abspath_from_identifier(identifier) with open(output_path, "w") as fh: shasum_string = "{} {}\n".format(identifier, fpath) fh.write(shasum_string) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset-path", required=True) parser.add_argument("--identifier", required=True) parser.add_argument("--output-path", required=True) args = parser.parse_args() main(args.dataset_path, args.identifier, args.output_path)
Add sample smart tool for testing purposes
Add sample smart tool for testing purposes
Python
mit
JIC-CSB/jobarchitect,JIC-CSB/jobarchitect
Add sample smart tool for testing purposes
import argparse from dtoolcore import DataSet def main(dataset_path, identifier, output_path): dataset = DataSet.from_path(dataset_path) fpath = dataset.abspath_from_identifier(identifier) with open(output_path, "w") as fh: shasum_string = "{} {}\n".format(identifier, fpath) fh.write(shasum_string) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset-path", required=True) parser.add_argument("--identifier", required=True) parser.add_argument("--output-path", required=True) args = parser.parse_args() main(args.dataset_path, args.identifier, args.output_path)
<commit_before><commit_msg>Add sample smart tool for testing purposes<commit_after>
import argparse from dtoolcore import DataSet def main(dataset_path, identifier, output_path): dataset = DataSet.from_path(dataset_path) fpath = dataset.abspath_from_identifier(identifier) with open(output_path, "w") as fh: shasum_string = "{} {}\n".format(identifier, fpath) fh.write(shasum_string) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset-path", required=True) parser.add_argument("--identifier", required=True) parser.add_argument("--output-path", required=True) args = parser.parse_args() main(args.dataset_path, args.identifier, args.output_path)
Add sample smart tool for testing purposesimport argparse from dtoolcore import DataSet def main(dataset_path, identifier, output_path): dataset = DataSet.from_path(dataset_path) fpath = dataset.abspath_from_identifier(identifier) with open(output_path, "w") as fh: shasum_string = "{} {}\n".format(identifier, fpath) fh.write(shasum_string) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset-path", required=True) parser.add_argument("--identifier", required=True) parser.add_argument("--output-path", required=True) args = parser.parse_args() main(args.dataset_path, args.identifier, args.output_path)
<commit_before><commit_msg>Add sample smart tool for testing purposes<commit_after>import argparse from dtoolcore import DataSet def main(dataset_path, identifier, output_path): dataset = DataSet.from_path(dataset_path) fpath = dataset.abspath_from_identifier(identifier) with open(output_path, "w") as fh: shasum_string = "{} {}\n".format(identifier, fpath) fh.write(shasum_string) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dataset-path", required=True) parser.add_argument("--identifier", required=True) parser.add_argument("--output-path", required=True) args = parser.parse_args() main(args.dataset_path, args.identifier, args.output_path)
e9d5143b8751bee1d74a5cfebeca848225426d68
tests/test_special_tokens.py
tests/test_special_tokens.py
from tests import TestCase class SignedIntegerTokenTestCase(TestCase): def setup_method(self, method): TestCase.setup_method(self, method) self.session.add( self.TextItem(name=u'index', content=u'some 12-14') ) self.session.commit() class TestSignedIntegersWithRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = True def test_with_hyphen_search_term(self): assert self.TextItemQuery( self.TextItem, self.session ).search('12-14').count() class TestSignedIntegersWithoutRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = False def test_with_hyphen_search_term(self): assert not self.TextItemQuery( self.TextItem, self.session ).search('12-14').count()
Add test case for special tokens
Add test case for special tokens
Python
bsd-3-clause
cristen/sqlalchemy-searchable
Add test case for special tokens
from tests import TestCase class SignedIntegerTokenTestCase(TestCase): def setup_method(self, method): TestCase.setup_method(self, method) self.session.add( self.TextItem(name=u'index', content=u'some 12-14') ) self.session.commit() class TestSignedIntegersWithRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = True def test_with_hyphen_search_term(self): assert self.TextItemQuery( self.TextItem, self.session ).search('12-14').count() class TestSignedIntegersWithoutRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = False def test_with_hyphen_search_term(self): assert not self.TextItemQuery( self.TextItem, self.session ).search('12-14').count()
<commit_before><commit_msg>Add test case for special tokens<commit_after>
from tests import TestCase class SignedIntegerTokenTestCase(TestCase): def setup_method(self, method): TestCase.setup_method(self, method) self.session.add( self.TextItem(name=u'index', content=u'some 12-14') ) self.session.commit() class TestSignedIntegersWithRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = True def test_with_hyphen_search_term(self): assert self.TextItemQuery( self.TextItem, self.session ).search('12-14').count() class TestSignedIntegersWithoutRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = False def test_with_hyphen_search_term(self): assert not self.TextItemQuery( self.TextItem, self.session ).search('12-14').count()
Add test case for special tokensfrom tests import TestCase class SignedIntegerTokenTestCase(TestCase): def setup_method(self, method): TestCase.setup_method(self, method) self.session.add( self.TextItem(name=u'index', content=u'some 12-14') ) self.session.commit() class TestSignedIntegersWithRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = True def test_with_hyphen_search_term(self): assert self.TextItemQuery( self.TextItem, self.session ).search('12-14').count() class TestSignedIntegersWithoutRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = False def test_with_hyphen_search_term(self): assert not self.TextItemQuery( self.TextItem, self.session ).search('12-14').count()
<commit_before><commit_msg>Add test case for special tokens<commit_after>from tests import TestCase class SignedIntegerTokenTestCase(TestCase): def setup_method(self, method): TestCase.setup_method(self, method) self.session.add( self.TextItem(name=u'index', content=u'some 12-14') ) self.session.commit() class TestSignedIntegersWithRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = True def test_with_hyphen_search_term(self): assert self.TextItemQuery( self.TextItem, self.session ).search('12-14').count() class TestSignedIntegersWithoutRemoveHyphens(SignedIntegerTokenTestCase): remove_hyphens = False def test_with_hyphen_search_term(self): assert not self.TextItemQuery( self.TextItem, self.session ).search('12-14').count()
3742545d6b2c13198bd1ecb3f7c675a8e6dad948
premium_and_date.py
premium_and_date.py
from datetime import date def get_first_payment_date(signup_date, previous_premium): raise NotImplementedError() def get_end_date(sign_up_date, cancel_date): raise NotImplementedError() def get_billing_day(signup_date): raise NotImplementedError()
Add file with premium dates task
Add file with premium dates task
Python
mit
coolshop-com/coolshop-application-assignment
Add file with premium dates task
from datetime import date def get_first_payment_date(signup_date, previous_premium): raise NotImplementedError() def get_end_date(sign_up_date, cancel_date): raise NotImplementedError() def get_billing_day(signup_date): raise NotImplementedError()
<commit_before><commit_msg>Add file with premium dates task<commit_after>
from datetime import date def get_first_payment_date(signup_date, previous_premium): raise NotImplementedError() def get_end_date(sign_up_date, cancel_date): raise NotImplementedError() def get_billing_day(signup_date): raise NotImplementedError()
Add file with premium dates taskfrom datetime import date def get_first_payment_date(signup_date, previous_premium): raise NotImplementedError() def get_end_date(sign_up_date, cancel_date): raise NotImplementedError() def get_billing_day(signup_date): raise NotImplementedError()
<commit_before><commit_msg>Add file with premium dates task<commit_after>from datetime import date def get_first_payment_date(signup_date, previous_premium): raise NotImplementedError() def get_end_date(sign_up_date, cancel_date): raise NotImplementedError() def get_billing_day(signup_date): raise NotImplementedError()
a1a74dc33ef90dbb833e8490b2bd8cfc33915dce
satchless/contrib/pricing/field/__init__.py
satchless/contrib/pricing/field/__init__.py
from django.db.models import Min, Max from ...pricing import Price class FieldGetter(object): def __init__(self, field_name='price', currency=None): self.currency = currency self.field_name = field_name class ProductFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant.product, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(product, self.field_name) except AttributeError: return price return (Price(instance_price, instance_price), Price(instance_price, instance_price)) class VariantFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price minmax = product.variants.all().aggregate( min_price=Min(self.field_name), max_price=Max(self.field_name)) return (Price(minmax['min_price'], minmax['min_price']), Price(minmax['max_price'], minmax['max_price']))
Add trivial pricing handler - Product/Variant field getter
Add trivial pricing handler - Product/Variant field getter
Python
bsd-3-clause
fusionbox/satchless,taedori81/satchless,fusionbox/satchless,fusionbox/satchless
Add trivial pricing handler - Product/Variant field getter
from django.db.models import Min, Max from ...pricing import Price class FieldGetter(object): def __init__(self, field_name='price', currency=None): self.currency = currency self.field_name = field_name class ProductFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant.product, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(product, self.field_name) except AttributeError: return price return (Price(instance_price, instance_price), Price(instance_price, instance_price)) class VariantFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price minmax = product.variants.all().aggregate( min_price=Min(self.field_name), max_price=Max(self.field_name)) return (Price(minmax['min_price'], minmax['min_price']), Price(minmax['max_price'], minmax['max_price']))
<commit_before><commit_msg>Add trivial pricing handler - Product/Variant field getter<commit_after>
from django.db.models import Min, Max from ...pricing import Price class FieldGetter(object): def __init__(self, field_name='price', currency=None): self.currency = currency self.field_name = field_name class ProductFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant.product, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(product, self.field_name) except AttributeError: return price return (Price(instance_price, instance_price), Price(instance_price, instance_price)) class VariantFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price minmax = product.variants.all().aggregate( min_price=Min(self.field_name), max_price=Max(self.field_name)) return (Price(minmax['min_price'], minmax['min_price']), Price(minmax['max_price'], minmax['max_price']))
Add trivial pricing handler - Product/Variant field getterfrom django.db.models import Min, Max from ...pricing import Price class FieldGetter(object): def __init__(self, field_name='price', currency=None): self.currency = currency self.field_name = field_name class ProductFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant.product, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(product, self.field_name) except AttributeError: return price return (Price(instance_price, instance_price), Price(instance_price, instance_price)) class VariantFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price minmax = product.variants.all().aggregate( min_price=Min(self.field_name), max_price=Max(self.field_name)) return (Price(minmax['min_price'], minmax['min_price']), Price(minmax['max_price'], minmax['max_price']))
<commit_before><commit_msg>Add trivial pricing handler - Product/Variant field getter<commit_after>from django.db.models import Min, Max from ...pricing import Price class FieldGetter(object): def __init__(self, field_name='price', currency=None): self.currency = currency self.field_name = field_name class ProductFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant.product, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(product, self.field_name) except AttributeError: return price return (Price(instance_price, instance_price), Price(instance_price, instance_price)) class VariantFieldGetter(FieldGetter): def get_variant_price(self, variant, currency, quantity=1, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price try: instance_price = getattr(variant, self.field_name) except AttributeError: return price return Price(instance_price, instance_price) def get_product_price_range(self, product, currency, **kwargs): price = kwargs.pop('price') if self.currency and self.currency != currency: return price minmax = product.variants.all().aggregate( min_price=Min(self.field_name), max_price=Max(self.field_name)) return (Price(minmax['min_price'], minmax['min_price']), Price(minmax['max_price'], minmax['max_price']))
c418536827c523bb87dcdc893458e08ddba18102
support/jenkins/buildNoModules.py
support/jenkins/buildNoModules.py
import os from subprocess import call # To be called from the build folder in the OpenSpace modules = os.listdir("../modules") cmd = ["cmake"] cmd.append("-DGHOUL_USE_DEVIL=OFF") for m in modules: cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=OFF") cmd.append("..") call(cmd)
Add script to build OpenSpace with no modules enabled
Add script to build OpenSpace with no modules enabled
Python
mit
OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace,OpenSpace/OpenSpace
Add script to build OpenSpace with no modules enabled
import os from subprocess import call # To be called from the build folder in the OpenSpace modules = os.listdir("../modules") cmd = ["cmake"] cmd.append("-DGHOUL_USE_DEVIL=OFF") for m in modules: cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=OFF") cmd.append("..") call(cmd)
<commit_before><commit_msg>Add script to build OpenSpace with no modules enabled<commit_after>
import os from subprocess import call # To be called from the build folder in the OpenSpace modules = os.listdir("../modules") cmd = ["cmake"] cmd.append("-DGHOUL_USE_DEVIL=OFF") for m in modules: cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=OFF") cmd.append("..") call(cmd)
Add script to build OpenSpace with no modules enabledimport os from subprocess import call # To be called from the build folder in the OpenSpace modules = os.listdir("../modules") cmd = ["cmake"] cmd.append("-DGHOUL_USE_DEVIL=OFF") for m in modules: cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=OFF") cmd.append("..") call(cmd)
<commit_before><commit_msg>Add script to build OpenSpace with no modules enabled<commit_after>import os from subprocess import call # To be called from the build folder in the OpenSpace modules = os.listdir("../modules") cmd = ["cmake"] cmd.append("-DGHOUL_USE_DEVIL=OFF") for m in modules: cmd.append("-DOPENSPACE_MODULE_" + m.upper() + "=OFF") cmd.append("..") call(cmd)
6fc69f735aa075aad15e066c5a2d1c0cb7df722a
site/api/migrations/0013_auto_20150317_1053.py
site/api/migrations/0013_auto_20150317_1053.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ('api', '0012_auto_20150313_1631'), ] operations = [ migrations.RemoveField( model_name='location', name='geom', ), migrations.RemoveField( model_name='location', name='poly', ), migrations.RemoveField( model_name='location', name='ptype', ), migrations.AddField( model_name='location', name='point', field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, db_column=b'geom'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon', field=django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True, db_column=b'poly'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon_type', field=models.CharField(max_length=32, null=True, db_column=b'ptype'), preserve_default=True, ), ]
Add migration file to reflect use of db_column in models
Add migration file to reflect use of db_column in models
Python
mit
LitPalimpsest/parser-api-search,LitPalimpsest/parser-api-search,LitPalimpsest/parser-api-search
Add migration file to reflect use of db_column in models
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ('api', '0012_auto_20150313_1631'), ] operations = [ migrations.RemoveField( model_name='location', name='geom', ), migrations.RemoveField( model_name='location', name='poly', ), migrations.RemoveField( model_name='location', name='ptype', ), migrations.AddField( model_name='location', name='point', field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, db_column=b'geom'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon', field=django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True, db_column=b'poly'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon_type', field=models.CharField(max_length=32, null=True, db_column=b'ptype'), preserve_default=True, ), ]
<commit_before><commit_msg>Add migration file to reflect use of db_column in models<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ('api', '0012_auto_20150313_1631'), ] operations = [ migrations.RemoveField( model_name='location', name='geom', ), migrations.RemoveField( model_name='location', name='poly', ), migrations.RemoveField( model_name='location', name='ptype', ), migrations.AddField( model_name='location', name='point', field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, db_column=b'geom'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon', field=django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True, db_column=b'poly'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon_type', field=models.CharField(max_length=32, null=True, db_column=b'ptype'), preserve_default=True, ), ]
Add migration file to reflect use of db_column in models# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ('api', '0012_auto_20150313_1631'), ] operations = [ migrations.RemoveField( model_name='location', name='geom', ), migrations.RemoveField( model_name='location', name='poly', ), migrations.RemoveField( model_name='location', name='ptype', ), migrations.AddField( model_name='location', name='point', field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, db_column=b'geom'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon', field=django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True, db_column=b'poly'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon_type', field=models.CharField(max_length=32, null=True, db_column=b'ptype'), preserve_default=True, ), ]
<commit_before><commit_msg>Add migration file to reflect use of db_column in models<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.contrib.gis.db.models.fields class Migration(migrations.Migration): dependencies = [ ('api', '0012_auto_20150313_1631'), ] operations = [ migrations.RemoveField( model_name='location', name='geom', ), migrations.RemoveField( model_name='location', name='poly', ), migrations.RemoveField( model_name='location', name='ptype', ), migrations.AddField( model_name='location', name='point', field=django.contrib.gis.db.models.fields.PointField(srid=4326, null=True, db_column=b'geom'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon', field=django.contrib.gis.db.models.fields.PolygonField(srid=4326, null=True, db_column=b'poly'), preserve_default=True, ), migrations.AddField( model_name='location', name='polygon_type', field=models.CharField(max_length=32, null=True, db_column=b'ptype'), preserve_default=True, ), ]
e2c9f4bba4ebe80b15026ccb1bb35ef1bf1f199d
migrations/versions/299d2746edd_removed_deadline_column_pimpy.py
migrations/versions/299d2746edd_removed_deadline_column_pimpy.py
"""removed deadline column pimpy Revision ID: 299d2746edd Revises: 40f1fd0d52 Create Date: 2015-07-18 13:21:16.172796 """ # revision identifiers, used by Alembic. revision = '299d2746edd' down_revision = '40f1fd0d52' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('pimpy_task', 'deadline') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('pimpy_task', sa.Column('deadline', mysql.DATETIME(), nullable=True)) ### end Alembic commands ###
Add migration for removing deadline
Add migration for removing deadline
Python
mit
viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct
Add migration for removing deadline
"""removed deadline column pimpy Revision ID: 299d2746edd Revises: 40f1fd0d52 Create Date: 2015-07-18 13:21:16.172796 """ # revision identifiers, used by Alembic. revision = '299d2746edd' down_revision = '40f1fd0d52' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('pimpy_task', 'deadline') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('pimpy_task', sa.Column('deadline', mysql.DATETIME(), nullable=True)) ### end Alembic commands ###
<commit_before><commit_msg>Add migration for removing deadline<commit_after>
"""removed deadline column pimpy Revision ID: 299d2746edd Revises: 40f1fd0d52 Create Date: 2015-07-18 13:21:16.172796 """ # revision identifiers, used by Alembic. revision = '299d2746edd' down_revision = '40f1fd0d52' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('pimpy_task', 'deadline') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('pimpy_task', sa.Column('deadline', mysql.DATETIME(), nullable=True)) ### end Alembic commands ###
Add migration for removing deadline"""removed deadline column pimpy Revision ID: 299d2746edd Revises: 40f1fd0d52 Create Date: 2015-07-18 13:21:16.172796 """ # revision identifiers, used by Alembic. revision = '299d2746edd' down_revision = '40f1fd0d52' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('pimpy_task', 'deadline') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('pimpy_task', sa.Column('deadline', mysql.DATETIME(), nullable=True)) ### end Alembic commands ###
<commit_before><commit_msg>Add migration for removing deadline<commit_after>"""removed deadline column pimpy Revision ID: 299d2746edd Revises: 40f1fd0d52 Create Date: 2015-07-18 13:21:16.172796 """ # revision identifiers, used by Alembic. revision = '299d2746edd' down_revision = '40f1fd0d52' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('pimpy_task', 'deadline') ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('pimpy_task', sa.Column('deadline', mysql.DATETIME(), nullable=True)) ### end Alembic commands ###
de57228d38d9a95953ffc90f70e03cbfda806774
networkx/algorithms/tests/test_betweenness_centrality.py
networkx/algorithms/tests/test_betweenness_centrality.py
#!/usr/bin/env python from nose.tools import * import networkx class TestBetweennessCentrality: def setUp(self): G=networkx.Graph(); G.add_edge(0,1,3) G.add_edge(0,2,2) G.add_edge(0,3,6) G.add_edge(0,4,4) G.add_edge(1,3,5) G.add_edge(1,5,5) G.add_edge(2,4,1) G.add_edge(3,4,2) G.add_edge(3,5,1) G.add_edge(4,5,4) self.G=G self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} def test_brandes_betweenness(self): b=networkx.betweenness_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n]) def test_load(self): b=networkx.load_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n])
Add betweeness centrality and load centrality test.
Add betweeness centrality and load centrality test. --HG-- extra : convert_revision : svn%3A3ed01bd8-26fb-0310-9e4c-ca1a4053419f/networkx/trunk%401110
Python
bsd-3-clause
chrisnatali/networkx,tmilicic/networkx,bzero/networkx,farhaanbukhsh/networkx,jtorrents/networkx,RMKD/networkx,ionanrozenfeld/networkx,debsankha/networkx,jfinkels/networkx,wasade/networkx,ghdk/networkx,ionanrozenfeld/networkx,jni/networkx,jakevdp/networkx,harlowja/networkx,kernc/networkx,jcurbelo/networkx,SanketDG/networkx,dhimmel/networkx,kai5263499/networkx,harlowja/networkx,debsankha/networkx,RMKD/networkx,JamesClough/networkx,harlowja/networkx,nathania/networkx,andnovar/networkx,aureooms/networkx,dhimmel/networkx,dhimmel/networkx,farhaanbukhsh/networkx,dmoliveira/networkx,blublud/networkx,OrkoHunter/networkx,jtorrents/networkx,sharifulgeo/networkx,dmoliveira/networkx,chrisnatali/networkx,sharifulgeo/networkx,nathania/networkx,aureooms/networkx,jakevdp/networkx,blublud/networkx,michaelpacer/networkx,beni55/networkx,RMKD/networkx,bzero/networkx,kai5263499/networkx,ionanrozenfeld/networkx,bzero/networkx,yashu-seth/networkx,cmtm/networkx,nathania/networkx,aureooms/networkx,kernc/networkx,jni/networkx,debsankha/networkx,ghdk/networkx,ghdk/networkx,sharifulgeo/networkx,farhaanbukhsh/networkx,goulu/networkx,chrisnatali/networkx,kernc/networkx,jni/networkx,ltiao/networkx,blublud/networkx,NvanAdrichem/networkx,Sixshaman/networkx,jakevdp/networkx,dmoliveira/networkx,kai5263499/networkx
Add betweeness centrality and load centrality test. --HG-- extra : convert_revision : svn%3A3ed01bd8-26fb-0310-9e4c-ca1a4053419f/networkx/trunk%401110
#!/usr/bin/env python from nose.tools import * import networkx class TestBetweennessCentrality: def setUp(self): G=networkx.Graph(); G.add_edge(0,1,3) G.add_edge(0,2,2) G.add_edge(0,3,6) G.add_edge(0,4,4) G.add_edge(1,3,5) G.add_edge(1,5,5) G.add_edge(2,4,1) G.add_edge(3,4,2) G.add_edge(3,5,1) G.add_edge(4,5,4) self.G=G self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} def test_brandes_betweenness(self): b=networkx.betweenness_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n]) def test_load(self): b=networkx.load_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n])
<commit_before><commit_msg>Add betweeness centrality and load centrality test. --HG-- extra : convert_revision : svn%3A3ed01bd8-26fb-0310-9e4c-ca1a4053419f/networkx/trunk%401110<commit_after>
#!/usr/bin/env python from nose.tools import * import networkx class TestBetweennessCentrality: def setUp(self): G=networkx.Graph(); G.add_edge(0,1,3) G.add_edge(0,2,2) G.add_edge(0,3,6) G.add_edge(0,4,4) G.add_edge(1,3,5) G.add_edge(1,5,5) G.add_edge(2,4,1) G.add_edge(3,4,2) G.add_edge(3,5,1) G.add_edge(4,5,4) self.G=G self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} def test_brandes_betweenness(self): b=networkx.betweenness_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n]) def test_load(self): b=networkx.load_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n])
Add betweeness centrality and load centrality test. --HG-- extra : convert_revision : svn%3A3ed01bd8-26fb-0310-9e4c-ca1a4053419f/networkx/trunk%401110#!/usr/bin/env python from nose.tools import * import networkx class TestBetweennessCentrality: def setUp(self): G=networkx.Graph(); G.add_edge(0,1,3) G.add_edge(0,2,2) G.add_edge(0,3,6) G.add_edge(0,4,4) G.add_edge(1,3,5) G.add_edge(1,5,5) G.add_edge(2,4,1) G.add_edge(3,4,2) G.add_edge(3,5,1) G.add_edge(4,5,4) self.G=G self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} def test_brandes_betweenness(self): b=networkx.betweenness_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n]) def test_load(self): b=networkx.load_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n])
<commit_before><commit_msg>Add betweeness centrality and load centrality test. --HG-- extra : convert_revision : svn%3A3ed01bd8-26fb-0310-9e4c-ca1a4053419f/networkx/trunk%401110<commit_after>#!/usr/bin/env python from nose.tools import * import networkx class TestBetweennessCentrality: def setUp(self): G=networkx.Graph(); G.add_edge(0,1,3) G.add_edge(0,2,2) G.add_edge(0,3,6) G.add_edge(0,4,4) G.add_edge(1,3,5) G.add_edge(1,5,5) G.add_edge(2,4,1) G.add_edge(3,4,2) G.add_edge(3,5,1) G.add_edge(4,5,4) self.G=G self.exact_weighted={0: 4.0, 1: 0.0, 2: 8.0, 3: 6.0, 4: 8.0, 5: 0.0} def test_brandes_betweenness(self): b=networkx.betweenness_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n]) def test_load(self): b=networkx.load_centrality(self.G,weighted_edges=True, normalized=False) for n in sorted(self.G): assert_equal(b[n],self.exact_weighted[n])
15c589b6b186865faf0dcec10b75d54d8ed9200e
test/test_util.py
test/test_util.py
import mock import unittest from .models import B from json import dumps from sir import util from sir.schema import searchentities class VersionCheckerTest(unittest.TestCase): def setUp(self): urlopen = mock.patch("sir.util.urllib2.urlopen") urlopenmock = urlopen.start() self.addCleanup(urlopen.stop) self.read = mock.Mock() urlopenmock.return_value.read = self.read schema = mock.patch.dict("sir.schema.SCHEMA", {"testcore": searchentities.SearchEntity( B, [searchentities.SearchField("id", "id")], 1.1)}) schema.start() self.addCleanup(schema.stop) config = mock.patch("sir.util.config.CFG") config.return_value = "" config.start() self.addCleanup(config.stop) def test_matching_version(self): self.read.return_value = dumps({"version": 1.1}) util.solr_version_check("testcore") def test_solr_version_too_large(self): self.read.return_value = dumps({"version": 1.2}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.2", util.solr_version_check, "testcore") def test_solr_version_too_small(self): self.read.return_value = dumps({"version": 1.0}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.0", util.solr_version_check, "testcore")
Add an uncommitted test file
Add an uncommitted test file
Python
mit
jeffweeksio/sir
Add an uncommitted test file
import mock import unittest from .models import B from json import dumps from sir import util from sir.schema import searchentities class VersionCheckerTest(unittest.TestCase): def setUp(self): urlopen = mock.patch("sir.util.urllib2.urlopen") urlopenmock = urlopen.start() self.addCleanup(urlopen.stop) self.read = mock.Mock() urlopenmock.return_value.read = self.read schema = mock.patch.dict("sir.schema.SCHEMA", {"testcore": searchentities.SearchEntity( B, [searchentities.SearchField("id", "id")], 1.1)}) schema.start() self.addCleanup(schema.stop) config = mock.patch("sir.util.config.CFG") config.return_value = "" config.start() self.addCleanup(config.stop) def test_matching_version(self): self.read.return_value = dumps({"version": 1.1}) util.solr_version_check("testcore") def test_solr_version_too_large(self): self.read.return_value = dumps({"version": 1.2}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.2", util.solr_version_check, "testcore") def test_solr_version_too_small(self): self.read.return_value = dumps({"version": 1.0}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.0", util.solr_version_check, "testcore")
<commit_before><commit_msg>Add an uncommitted test file<commit_after>
import mock import unittest from .models import B from json import dumps from sir import util from sir.schema import searchentities class VersionCheckerTest(unittest.TestCase): def setUp(self): urlopen = mock.patch("sir.util.urllib2.urlopen") urlopenmock = urlopen.start() self.addCleanup(urlopen.stop) self.read = mock.Mock() urlopenmock.return_value.read = self.read schema = mock.patch.dict("sir.schema.SCHEMA", {"testcore": searchentities.SearchEntity( B, [searchentities.SearchField("id", "id")], 1.1)}) schema.start() self.addCleanup(schema.stop) config = mock.patch("sir.util.config.CFG") config.return_value = "" config.start() self.addCleanup(config.stop) def test_matching_version(self): self.read.return_value = dumps({"version": 1.1}) util.solr_version_check("testcore") def test_solr_version_too_large(self): self.read.return_value = dumps({"version": 1.2}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.2", util.solr_version_check, "testcore") def test_solr_version_too_small(self): self.read.return_value = dumps({"version": 1.0}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.0", util.solr_version_check, "testcore")
Add an uncommitted test fileimport mock import unittest from .models import B from json import dumps from sir import util from sir.schema import searchentities class VersionCheckerTest(unittest.TestCase): def setUp(self): urlopen = mock.patch("sir.util.urllib2.urlopen") urlopenmock = urlopen.start() self.addCleanup(urlopen.stop) self.read = mock.Mock() urlopenmock.return_value.read = self.read schema = mock.patch.dict("sir.schema.SCHEMA", {"testcore": searchentities.SearchEntity( B, [searchentities.SearchField("id", "id")], 1.1)}) schema.start() self.addCleanup(schema.stop) config = mock.patch("sir.util.config.CFG") config.return_value = "" config.start() self.addCleanup(config.stop) def test_matching_version(self): self.read.return_value = dumps({"version": 1.1}) util.solr_version_check("testcore") def test_solr_version_too_large(self): self.read.return_value = dumps({"version": 1.2}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.2", util.solr_version_check, "testcore") def test_solr_version_too_small(self): self.read.return_value = dumps({"version": 1.0}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.0", util.solr_version_check, "testcore")
<commit_before><commit_msg>Add an uncommitted test file<commit_after>import mock import unittest from .models import B from json import dumps from sir import util from sir.schema import searchentities class VersionCheckerTest(unittest.TestCase): def setUp(self): urlopen = mock.patch("sir.util.urllib2.urlopen") urlopenmock = urlopen.start() self.addCleanup(urlopen.stop) self.read = mock.Mock() urlopenmock.return_value.read = self.read schema = mock.patch.dict("sir.schema.SCHEMA", {"testcore": searchentities.SearchEntity( B, [searchentities.SearchField("id", "id")], 1.1)}) schema.start() self.addCleanup(schema.stop) config = mock.patch("sir.util.config.CFG") config.return_value = "" config.start() self.addCleanup(config.stop) def test_matching_version(self): self.read.return_value = dumps({"version": 1.1}) util.solr_version_check("testcore") def test_solr_version_too_large(self): self.read.return_value = dumps({"version": 1.2}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.2", util.solr_version_check, "testcore") def test_solr_version_too_small(self): self.read.return_value = dumps({"version": 1.0}) self.assertRaisesRegexp(util.VersionMismatchException, "^testcore: Expected 1.1, got 1.0", util.solr_version_check, "testcore")
bb5d89ca793cfbdeaf4dea3a4791f675ecf9039f
order-flights/flights-recursion.py
order-flights/flights-recursion.py
def next_leg(flights, source): """Find and append next flight to trip""" for leg in flights: if leg[0] == source: trip.append(leg) source = leg[1] flights.remove(leg) next_leg(flights, source) def previous_leg(flights, destination): """Find and prepend previous flight to trip""" for leg in flights: if leg[1] == destination: trip.insert(0, leg) destination = leg[0] flights.remove(leg) previous_leg(flights, destination) flights = [ ['Minneapolis', 'Las Vegas'], ['LA', 'Chicago'], ['Las Vegas', 'Seattle'], ['Chicago', 'Atlanta'], ['Atlanta', 'NY'], ['NY', 'Minneapolis'], ] trip = [flights[0]] flights.pop(0) next_leg(flights, trip[0][1]) previous_leg(flights, trip[0][0]) print(trip)
Order legs of a flight trip using recursion
Order legs of a flight trip using recursion
Python
mit
zedfoxus/stackoverflow-answers,zedfoxus/stackoverflow-answers,zedfoxus/stackoverflow-answers
Order legs of a flight trip using recursion
def next_leg(flights, source): """Find and append next flight to trip""" for leg in flights: if leg[0] == source: trip.append(leg) source = leg[1] flights.remove(leg) next_leg(flights, source) def previous_leg(flights, destination): """Find and prepend previous flight to trip""" for leg in flights: if leg[1] == destination: trip.insert(0, leg) destination = leg[0] flights.remove(leg) previous_leg(flights, destination) flights = [ ['Minneapolis', 'Las Vegas'], ['LA', 'Chicago'], ['Las Vegas', 'Seattle'], ['Chicago', 'Atlanta'], ['Atlanta', 'NY'], ['NY', 'Minneapolis'], ] trip = [flights[0]] flights.pop(0) next_leg(flights, trip[0][1]) previous_leg(flights, trip[0][0]) print(trip)
<commit_before><commit_msg>Order legs of a flight trip using recursion<commit_after>
def next_leg(flights, source): """Find and append next flight to trip""" for leg in flights: if leg[0] == source: trip.append(leg) source = leg[1] flights.remove(leg) next_leg(flights, source) def previous_leg(flights, destination): """Find and prepend previous flight to trip""" for leg in flights: if leg[1] == destination: trip.insert(0, leg) destination = leg[0] flights.remove(leg) previous_leg(flights, destination) flights = [ ['Minneapolis', 'Las Vegas'], ['LA', 'Chicago'], ['Las Vegas', 'Seattle'], ['Chicago', 'Atlanta'], ['Atlanta', 'NY'], ['NY', 'Minneapolis'], ] trip = [flights[0]] flights.pop(0) next_leg(flights, trip[0][1]) previous_leg(flights, trip[0][0]) print(trip)
Order legs of a flight trip using recursiondef next_leg(flights, source): """Find and append next flight to trip""" for leg in flights: if leg[0] == source: trip.append(leg) source = leg[1] flights.remove(leg) next_leg(flights, source) def previous_leg(flights, destination): """Find and prepend previous flight to trip""" for leg in flights: if leg[1] == destination: trip.insert(0, leg) destination = leg[0] flights.remove(leg) previous_leg(flights, destination) flights = [ ['Minneapolis', 'Las Vegas'], ['LA', 'Chicago'], ['Las Vegas', 'Seattle'], ['Chicago', 'Atlanta'], ['Atlanta', 'NY'], ['NY', 'Minneapolis'], ] trip = [flights[0]] flights.pop(0) next_leg(flights, trip[0][1]) previous_leg(flights, trip[0][0]) print(trip)
<commit_before><commit_msg>Order legs of a flight trip using recursion<commit_after>def next_leg(flights, source): """Find and append next flight to trip""" for leg in flights: if leg[0] == source: trip.append(leg) source = leg[1] flights.remove(leg) next_leg(flights, source) def previous_leg(flights, destination): """Find and prepend previous flight to trip""" for leg in flights: if leg[1] == destination: trip.insert(0, leg) destination = leg[0] flights.remove(leg) previous_leg(flights, destination) flights = [ ['Minneapolis', 'Las Vegas'], ['LA', 'Chicago'], ['Las Vegas', 'Seattle'], ['Chicago', 'Atlanta'], ['Atlanta', 'NY'], ['NY', 'Minneapolis'], ] trip = [flights[0]] flights.pop(0) next_leg(flights, trip[0][1]) previous_leg(flights, trip[0][0]) print(trip)
f7aeb4fa5bafa5218bed14e5b19c0fb9409e6700
examples/python/readMCParticles.py
examples/python/readMCParticles.py
#!/usr/bin/env python # # This is just a simple test script to check whether the python bindings are # actually working as intended from __future__ import print_function, absolute_import, unicode_literals import pyLCIO as lcio import sys reader = lcio.IOIMPL.LCFactory.getInstance().createLCReader() reader.open(sys.argv[1]) for event in reader: mcs = event.getCollection('MCParticle') for mc in mcs: print(mc.getEnergy())
Add small test script for python bindings
Add small test script for python bindings
Python
bsd-3-clause
iLCSoft/LCIO,iLCSoft/LCIO,iLCSoft/LCIO,iLCSoft/LCIO,iLCSoft/LCIO,iLCSoft/LCIO
Add small test script for python bindings
#!/usr/bin/env python # # This is just a simple test script to check whether the python bindings are # actually working as intended from __future__ import print_function, absolute_import, unicode_literals import pyLCIO as lcio import sys reader = lcio.IOIMPL.LCFactory.getInstance().createLCReader() reader.open(sys.argv[1]) for event in reader: mcs = event.getCollection('MCParticle') for mc in mcs: print(mc.getEnergy())
<commit_before><commit_msg>Add small test script for python bindings<commit_after>
#!/usr/bin/env python # # This is just a simple test script to check whether the python bindings are # actually working as intended from __future__ import print_function, absolute_import, unicode_literals import pyLCIO as lcio import sys reader = lcio.IOIMPL.LCFactory.getInstance().createLCReader() reader.open(sys.argv[1]) for event in reader: mcs = event.getCollection('MCParticle') for mc in mcs: print(mc.getEnergy())
Add small test script for python bindings#!/usr/bin/env python # # This is just a simple test script to check whether the python bindings are # actually working as intended from __future__ import print_function, absolute_import, unicode_literals import pyLCIO as lcio import sys reader = lcio.IOIMPL.LCFactory.getInstance().createLCReader() reader.open(sys.argv[1]) for event in reader: mcs = event.getCollection('MCParticle') for mc in mcs: print(mc.getEnergy())
<commit_before><commit_msg>Add small test script for python bindings<commit_after>#!/usr/bin/env python # # This is just a simple test script to check whether the python bindings are # actually working as intended from __future__ import print_function, absolute_import, unicode_literals import pyLCIO as lcio import sys reader = lcio.IOIMPL.LCFactory.getInstance().createLCReader() reader.open(sys.argv[1]) for event in reader: mcs = event.getCollection('MCParticle') for mc in mcs: print(mc.getEnergy())
478d8331e03e00365beafff455e8dc7ed2562af4
intake_bluesky/tests/test_dask_filler.py
intake_bluesky/tests/test_dask_filler.py
import event_model from bluesky.plans import count import numpy from ophyd.sim import NumpySeqHandler from ..core import DaskFiller def test_fill_event(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) filled_docs _, dask_filled_event = filled_docs[-2] arr = dask_filled_event['data']['img'].compute() assert arr.shape == (10, 10) assert isinstance(arr, numpy.ndarray) def test_fill_event_page(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] _, event = docs[-2] event_page = event_model.pack_event_page(event) docs[-2] = ('event_page', event_page) dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) _, dask_filled_event_page = filled_docs[-2] arr = dask_filled_event_page['data']['img'].compute() assert arr.shape == (1, 10, 10) assert isinstance(arr, numpy.ndarray)
Add basic smoke tests for dask.
TST: Add basic smoke tests for dask.
Python
bsd-3-clause
ericdill/databroker,ericdill/databroker
TST: Add basic smoke tests for dask.
import event_model from bluesky.plans import count import numpy from ophyd.sim import NumpySeqHandler from ..core import DaskFiller def test_fill_event(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) filled_docs _, dask_filled_event = filled_docs[-2] arr = dask_filled_event['data']['img'].compute() assert arr.shape == (10, 10) assert isinstance(arr, numpy.ndarray) def test_fill_event_page(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] _, event = docs[-2] event_page = event_model.pack_event_page(event) docs[-2] = ('event_page', event_page) dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) _, dask_filled_event_page = filled_docs[-2] arr = dask_filled_event_page['data']['img'].compute() assert arr.shape == (1, 10, 10) assert isinstance(arr, numpy.ndarray)
<commit_before><commit_msg>TST: Add basic smoke tests for dask.<commit_after>
import event_model from bluesky.plans import count import numpy from ophyd.sim import NumpySeqHandler from ..core import DaskFiller def test_fill_event(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) filled_docs _, dask_filled_event = filled_docs[-2] arr = dask_filled_event['data']['img'].compute() assert arr.shape == (10, 10) assert isinstance(arr, numpy.ndarray) def test_fill_event_page(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] _, event = docs[-2] event_page = event_model.pack_event_page(event) docs[-2] = ('event_page', event_page) dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) _, dask_filled_event_page = filled_docs[-2] arr = dask_filled_event_page['data']['img'].compute() assert arr.shape == (1, 10, 10) assert isinstance(arr, numpy.ndarray)
TST: Add basic smoke tests for dask.import event_model from bluesky.plans import count import numpy from ophyd.sim import NumpySeqHandler from ..core import DaskFiller def test_fill_event(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) filled_docs _, dask_filled_event = filled_docs[-2] arr = dask_filled_event['data']['img'].compute() assert arr.shape == (10, 10) assert isinstance(arr, numpy.ndarray) def test_fill_event_page(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] _, event = docs[-2] event_page = event_model.pack_event_page(event) docs[-2] = ('event_page', event_page) dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) _, dask_filled_event_page = filled_docs[-2] arr = dask_filled_event_page['data']['img'].compute() assert arr.shape == (1, 10, 10) assert isinstance(arr, numpy.ndarray)
<commit_before><commit_msg>TST: Add basic smoke tests for dask.<commit_after>import event_model from bluesky.plans import count import numpy from ophyd.sim import NumpySeqHandler from ..core import DaskFiller def test_fill_event(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) filled_docs _, dask_filled_event = filled_docs[-2] arr = dask_filled_event['data']['img'].compute() assert arr.shape == (10, 10) assert isinstance(arr, numpy.ndarray) def test_fill_event_page(RE, hw): docs = [] def callback(name, doc): docs.append((name, doc)) RE(count([hw.img]), callback) docs dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] _, event = docs[-2] event_page = event_model.pack_event_page(event) docs[-2] = ('event_page', event_page) dask_filler = DaskFiller({'NPY_SEQ': NumpySeqHandler}) filled_docs = [] for name, doc in docs: filled_docs.append(dask_filler(name, doc)) _, dask_filled_event_page = filled_docs[-2] arr = dask_filled_event_page['data']['img'].compute() assert arr.shape == (1, 10, 10) assert isinstance(arr, numpy.ndarray)
c1b090a8774030d4898a02b41cdbd9f199e2c1b8
packager/core/test/test_flavor.py
packager/core/test/test_flavor.py
#! /usr/bin/python from packager.core.flavor import debian_check from nose.tools import * from subprocess import call def test_debian_check(): ret = call(["test", "-f", "/etc/debian_version"]) == 0 assert debian_check() == ret
Add unit test for packager.core.flavor.py
Add unit test for packager.core.flavor.py
Python
mit
csdms/packagebuilder
Add unit test for packager.core.flavor.py
#! /usr/bin/python from packager.core.flavor import debian_check from nose.tools import * from subprocess import call def test_debian_check(): ret = call(["test", "-f", "/etc/debian_version"]) == 0 assert debian_check() == ret
<commit_before><commit_msg>Add unit test for packager.core.flavor.py<commit_after>
#! /usr/bin/python from packager.core.flavor import debian_check from nose.tools import * from subprocess import call def test_debian_check(): ret = call(["test", "-f", "/etc/debian_version"]) == 0 assert debian_check() == ret
Add unit test for packager.core.flavor.py#! /usr/bin/python from packager.core.flavor import debian_check from nose.tools import * from subprocess import call def test_debian_check(): ret = call(["test", "-f", "/etc/debian_version"]) == 0 assert debian_check() == ret
<commit_before><commit_msg>Add unit test for packager.core.flavor.py<commit_after>#! /usr/bin/python from packager.core.flavor import debian_check from nose.tools import * from subprocess import call def test_debian_check(): ret = call(["test", "-f", "/etc/debian_version"]) == 0 assert debian_check() == ret
b8121d15e59d08cd93ada94b69b37c8942308ede
scrapi/harvesters/huskiecommons.py
scrapi/harvesters/huskiecommons.py
''' Harvester for the Huskie Commons for the SHARE project Example API call: http://commons.lib.niu.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class HuskiecommonsHarvester(OAIHarvester): short_name = 'huskiecommons' long_name = 'Huskie Commons' url = 'http://commons.lib.niu.edu' base_url = 'http://commons.lib.niu.edu/oai/request' property_list = ['date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
Add harvester for Huskie Commons
Add harvester for Huskie Commons Closes [#SHARE-114]
Python
apache-2.0
fabianvf/scrapi,CenterForOpenScience/scrapi,felliott/scrapi,erinspace/scrapi,fabianvf/scrapi,erinspace/scrapi,felliott/scrapi,CenterForOpenScience/scrapi
Add harvester for Huskie Commons Closes [#SHARE-114]
''' Harvester for the Huskie Commons for the SHARE project Example API call: http://commons.lib.niu.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class HuskiecommonsHarvester(OAIHarvester): short_name = 'huskiecommons' long_name = 'Huskie Commons' url = 'http://commons.lib.niu.edu' base_url = 'http://commons.lib.niu.edu/oai/request' property_list = ['date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
<commit_before><commit_msg>Add harvester for Huskie Commons Closes [#SHARE-114]<commit_after>
''' Harvester for the Huskie Commons for the SHARE project Example API call: http://commons.lib.niu.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class HuskiecommonsHarvester(OAIHarvester): short_name = 'huskiecommons' long_name = 'Huskie Commons' url = 'http://commons.lib.niu.edu' base_url = 'http://commons.lib.niu.edu/oai/request' property_list = ['date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
Add harvester for Huskie Commons Closes [#SHARE-114]''' Harvester for the Huskie Commons for the SHARE project Example API call: http://commons.lib.niu.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class HuskiecommonsHarvester(OAIHarvester): short_name = 'huskiecommons' long_name = 'Huskie Commons' url = 'http://commons.lib.niu.edu' base_url = 'http://commons.lib.niu.edu/oai/request' property_list = ['date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
<commit_before><commit_msg>Add harvester for Huskie Commons Closes [#SHARE-114]<commit_after>''' Harvester for the Huskie Commons for the SHARE project Example API call: http://commons.lib.niu.edu/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class HuskiecommonsHarvester(OAIHarvester): short_name = 'huskiecommons' long_name = 'Huskie Commons' url = 'http://commons.lib.niu.edu' base_url = 'http://commons.lib.niu.edu/oai/request' property_list = ['date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
70587b40766a3c5ab3bfad9639790d332508fb76
dn1/revolver_test.py
dn1/revolver_test.py
__author__ = 'Nino Bašić <nino.basic@fmf.uni-lj.si>' import unittest from revolver import Revolver class RevolverTest(unittest.TestCase): def test_iter(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) self.assertIs(r, iter(r)) def test_revolver_1(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) vrstni_red = [1, 10, 100, 20, 200, 300] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_2(self): r = Revolver([[1], [2], [3], [4], [5]]) vrstni_red = [1, 2, 3, 4, 5] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_3(self): r = Revolver([[], [10, 20], [], [], []]) vrstni_red = [20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_4(self): r = Revolver([[], [], [], [], []]) with self.assertRaises(StopIteration): next(r) def test_revolver_5(self): r = Revolver([[10, 20, 30, 40, 50, 60, 70]]) vrstni_red = [70, 60, 50, 40, 30, 20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_6(self): r = Revolver([[10, 20, 30], [], [400, 500, 600], [], [2000, 3000, 4000]]) vrstni_red = [30, 600, 4000, 20, 500, 3000, 10, 400, 2000] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) if __name__ == '__main__': unittest.main()
Add unittest for Task 3
Add unittest for Task 3
Python
mit
nbasic/racunalnistvo-1
Add unittest for Task 3
__author__ = 'Nino Bašić <nino.basic@fmf.uni-lj.si>' import unittest from revolver import Revolver class RevolverTest(unittest.TestCase): def test_iter(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) self.assertIs(r, iter(r)) def test_revolver_1(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) vrstni_red = [1, 10, 100, 20, 200, 300] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_2(self): r = Revolver([[1], [2], [3], [4], [5]]) vrstni_red = [1, 2, 3, 4, 5] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_3(self): r = Revolver([[], [10, 20], [], [], []]) vrstni_red = [20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_4(self): r = Revolver([[], [], [], [], []]) with self.assertRaises(StopIteration): next(r) def test_revolver_5(self): r = Revolver([[10, 20, 30, 40, 50, 60, 70]]) vrstni_red = [70, 60, 50, 40, 30, 20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_6(self): r = Revolver([[10, 20, 30], [], [400, 500, 600], [], [2000, 3000, 4000]]) vrstni_red = [30, 600, 4000, 20, 500, 3000, 10, 400, 2000] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add unittest for Task 3<commit_after>
__author__ = 'Nino Bašić <nino.basic@fmf.uni-lj.si>' import unittest from revolver import Revolver class RevolverTest(unittest.TestCase): def test_iter(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) self.assertIs(r, iter(r)) def test_revolver_1(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) vrstni_red = [1, 10, 100, 20, 200, 300] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_2(self): r = Revolver([[1], [2], [3], [4], [5]]) vrstni_red = [1, 2, 3, 4, 5] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_3(self): r = Revolver([[], [10, 20], [], [], []]) vrstni_red = [20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_4(self): r = Revolver([[], [], [], [], []]) with self.assertRaises(StopIteration): next(r) def test_revolver_5(self): r = Revolver([[10, 20, 30, 40, 50, 60, 70]]) vrstni_red = [70, 60, 50, 40, 30, 20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_6(self): r = Revolver([[10, 20, 30], [], [400, 500, 600], [], [2000, 3000, 4000]]) vrstni_red = [30, 600, 4000, 20, 500, 3000, 10, 400, 2000] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) if __name__ == '__main__': unittest.main()
Add unittest for Task 3__author__ = 'Nino Bašić <nino.basic@fmf.uni-lj.si>' import unittest from revolver import Revolver class RevolverTest(unittest.TestCase): def test_iter(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) self.assertIs(r, iter(r)) def test_revolver_1(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) vrstni_red = [1, 10, 100, 20, 200, 300] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_2(self): r = Revolver([[1], [2], [3], [4], [5]]) vrstni_red = [1, 2, 3, 4, 5] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_3(self): r = Revolver([[], [10, 20], [], [], []]) vrstni_red = [20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_4(self): r = Revolver([[], [], [], [], []]) with self.assertRaises(StopIteration): next(r) def test_revolver_5(self): r = Revolver([[10, 20, 30, 40, 50, 60, 70]]) vrstni_red = [70, 60, 50, 40, 30, 20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_6(self): r = Revolver([[10, 20, 30], [], [400, 500, 600], [], [2000, 3000, 4000]]) vrstni_red = [30, 600, 4000, 20, 500, 3000, 10, 400, 2000] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add unittest for Task 3<commit_after>__author__ = 'Nino Bašić <nino.basic@fmf.uni-lj.si>' import unittest from revolver import Revolver class RevolverTest(unittest.TestCase): def test_iter(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) self.assertIs(r, iter(r)) def test_revolver_1(self): r = Revolver([[1], [20, 10], [300, 200, 100]]) vrstni_red = [1, 10, 100, 20, 200, 300] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_2(self): r = Revolver([[1], [2], [3], [4], [5]]) vrstni_red = [1, 2, 3, 4, 5] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_3(self): r = Revolver([[], [10, 20], [], [], []]) vrstni_red = [20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_4(self): r = Revolver([[], [], [], [], []]) with self.assertRaises(StopIteration): next(r) def test_revolver_5(self): r = Revolver([[10, 20, 30, 40, 50, 60, 70]]) vrstni_red = [70, 60, 50, 40, 30, 20, 10] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) def test_revolver_6(self): r = Revolver([[10, 20, 30], [], [400, 500, 600], [], [2000, 3000, 4000]]) vrstni_red = [30, 600, 4000, 20, 500, 3000, 10, 400, 2000] for x in vrstni_red: self.assertEqual(x, next(r)) with self.assertRaises(StopIteration): next(r) if __name__ == '__main__': unittest.main()
7a1981ef9499b12e16d9f7851b76e36a8b8a7f52
Utilities/Maintenance/JREUpdate.py
Utilities/Maintenance/JREUpdate.py
#!/usr/bin/env python description = """ Update the JRE tarballs to be bundled with the SCIFIOImageIO plugin. The OpenJDK JRE (but no the JDK) can be redistributed. It is downloaded at build time from Midas and shipped with the SCIFIOImageIO plugin so that the plugin "just works". The Fiji fellows maintain Git repositories that tracks the OpenJDK JRE. Here we clone that repository and create the JRE tarball from it. Currently, the tarball needs to be uploaded manually to midas3.kitware.com. In the future, pydas can be used for automatic upload. """ import os import subprocess import tarfile import tempfile platforms = ['linux-amd64', 'linux', # Newer MacOX will eventually need a download for Java7 (once the security # vulnerabilities are gone. #'macosx', 'win32', 'win64'] os.chdir(tempfile.gettempdir()) for platform in platforms: call = ['git', 'clone', '--depth', '0', 'git://fiji.sc/java/' + platform + '.git'] subprocess.check_call(call) for platform in platforms: print('Writing tarball for ' + platform + '...') os.chdir(platform) version = os.listdir(os.getcwd())[1] os.chdir(version) tarballFile = version + '.tar.bz2' with tarfile.open(tarballFile, 'w:bz2', dereference=True) as \ tarball: tarball.add('jre') os.chdir(os.path.join('..', '..'))
Add utility script to create JRE tarballs.
ENH: Add utility script to create JRE tarballs. These tarballs will be uploaded to the Midas server to be used by the SCIFIOImageIO module. Change-Id: I180cf0957221762ddf62b14e3712b70fe19e03bb
Python
apache-2.0
PlutoniumHeart/ITK,vfonov/ITK,hendradarwin/ITK,stnava/ITK,fbudin69500/ITK,hjmjohnson/ITK,heimdali/ITK,BlueBrain/ITK,ajjl/ITK,spinicist/ITK,vfonov/ITK,msmolens/ITK,vfonov/ITK,hendradarwin/ITK,stnava/ITK,atsnyder/ITK,richardbeare/ITK,stnava/ITK,LucHermitte/ITK,GEHC-Surgery/ITK,biotrump/ITK,BRAINSia/ITK,hinerm/ITK,spinicist/ITK,heimdali/ITK,richardbeare/ITK,Kitware/ITK,hinerm/ITK,spinicist/ITK,Kitware/ITK,stnava/ITK,jcfr/ITK,thewtex/ITK,stnava/ITK,spinicist/ITK,hjmjohnson/ITK,fbudin69500/ITK,fbudin69500/ITK,msmolens/ITK,vfonov/ITK,fedral/ITK,GEHC-Surgery/ITK,LucHermitte/ITK,hinerm/ITK,Kitware/ITK,zachary-williamson/ITK,atsnyder/ITK,PlutoniumHeart/ITK,richardbeare/ITK,hendradarwin/ITK,fedral/ITK,stnava/ITK,eile/ITK,malaterre/ITK,hinerm/ITK,fbudin69500/ITK,jcfr/ITK,BlueBrain/ITK,InsightSoftwareConsortium/ITK,LucHermitte/ITK,LucasGandel/ITK,stnava/ITK,thewtex/ITK,BlueBrain/ITK,ajjl/ITK,malaterre/ITK,BRAINSia/ITK,hendradarwin/ITK,ajjl/ITK,blowekamp/ITK,biotrump/ITK,zachary-williamson/ITK,LucasGandel/ITK,malaterre/ITK,jcfr/ITK,jmerkow/ITK,eile/ITK,InsightSoftwareConsortium/ITK,hinerm/ITK,fedral/ITK,heimdali/ITK,stnava/ITK,thewtex/ITK,BlueBrain/ITK,jcfr/ITK,biotrump/ITK,LucHermitte/ITK,spinicist/ITK,zachary-williamson/ITK,BlueBrain/ITK,Kitware/ITK,BlueBrain/ITK,PlutoniumHeart/ITK,biotrump/ITK,richardbeare/ITK,malaterre/ITK,ajjl/ITK,hinerm/ITK,hjmjohnson/ITK,blowekamp/ITK,LucasGandel/ITK,biotrump/ITK,hinerm/ITK,LucHermitte/ITK,blowekamp/ITK,malaterre/ITK,Kitware/ITK,eile/ITK,jmerkow/ITK,richardbeare/ITK,zachary-williamson/ITK,fbudin69500/ITK,zachary-williamson/ITK,ajjl/ITK,GEHC-Surgery/ITK,BRAINSia/ITK,InsightSoftwareConsortium/ITK,msmolens/ITK,jmerkow/ITK,msmolens/ITK,LucHermitte/ITK,Kitware/ITK,fbudin69500/ITK,stnava/ITK,fbudin69500/ITK,heimdali/ITK,BRAINSia/ITK,atsnyder/ITK,BlueBrain/ITK,hjmjohnson/ITK,GEHC-Surgery/ITK,fbudin69500/ITK,atsnyder/ITK,eile/ITK,atsnyder/ITK,Kitware/ITK,fedral/ITK,hjmjohnson/ITK,zachary-williamson/ITK,jcfr/ITK,vfonov/ITK,fedral/ITK,thewtex/ITK,msmolens/ITK,hendradarwin/ITK,PlutoniumHeart/ITK,blowekamp/ITK,malaterre/ITK,thewtex/ITK,eile/ITK,hinerm/ITK,InsightSoftwareConsortium/ITK,PlutoniumHeart/ITK,blowekamp/ITK,GEHC-Surgery/ITK,fedral/ITK,GEHC-Surgery/ITK,BlueBrain/ITK,spinicist/ITK,richardbeare/ITK,hinerm/ITK,atsnyder/ITK,spinicist/ITK,hendradarwin/ITK,ajjl/ITK,thewtex/ITK,LucasGandel/ITK,zachary-williamson/ITK,vfonov/ITK,PlutoniumHeart/ITK,ajjl/ITK,GEHC-Surgery/ITK,vfonov/ITK,BRAINSia/ITK,hjmjohnson/ITK,msmolens/ITK,zachary-williamson/ITK,spinicist/ITK,biotrump/ITK,msmolens/ITK,vfonov/ITK,GEHC-Surgery/ITK,LucasGandel/ITK,atsnyder/ITK,malaterre/ITK,fedral/ITK,thewtex/ITK,LucasGandel/ITK,eile/ITK,InsightSoftwareConsortium/ITK,biotrump/ITK,heimdali/ITK,jmerkow/ITK,PlutoniumHeart/ITK,LucHermitte/ITK,atsnyder/ITK,fedral/ITK,jcfr/ITK,eile/ITK,InsightSoftwareConsortium/ITK,blowekamp/ITK,malaterre/ITK,spinicist/ITK,richardbeare/ITK,heimdali/ITK,ajjl/ITK,heimdali/ITK,msmolens/ITK,eile/ITK,jmerkow/ITK,BRAINSia/ITK,hendradarwin/ITK,atsnyder/ITK,jmerkow/ITK,LucHermitte/ITK,eile/ITK,blowekamp/ITK,LucasGandel/ITK,heimdali/ITK,blowekamp/ITK,jcfr/ITK,vfonov/ITK,BRAINSia/ITK,jcfr/ITK,PlutoniumHeart/ITK,biotrump/ITK,LucasGandel/ITK,hendradarwin/ITK,hjmjohnson/ITK,jmerkow/ITK,jmerkow/ITK,InsightSoftwareConsortium/ITK,zachary-williamson/ITK,malaterre/ITK
ENH: Add utility script to create JRE tarballs. These tarballs will be uploaded to the Midas server to be used by the SCIFIOImageIO module. Change-Id: I180cf0957221762ddf62b14e3712b70fe19e03bb
#!/usr/bin/env python description = """ Update the JRE tarballs to be bundled with the SCIFIOImageIO plugin. The OpenJDK JRE (but no the JDK) can be redistributed. It is downloaded at build time from Midas and shipped with the SCIFIOImageIO plugin so that the plugin "just works". The Fiji fellows maintain Git repositories that tracks the OpenJDK JRE. Here we clone that repository and create the JRE tarball from it. Currently, the tarball needs to be uploaded manually to midas3.kitware.com. In the future, pydas can be used for automatic upload. """ import os import subprocess import tarfile import tempfile platforms = ['linux-amd64', 'linux', # Newer MacOX will eventually need a download for Java7 (once the security # vulnerabilities are gone. #'macosx', 'win32', 'win64'] os.chdir(tempfile.gettempdir()) for platform in platforms: call = ['git', 'clone', '--depth', '0', 'git://fiji.sc/java/' + platform + '.git'] subprocess.check_call(call) for platform in platforms: print('Writing tarball for ' + platform + '...') os.chdir(platform) version = os.listdir(os.getcwd())[1] os.chdir(version) tarballFile = version + '.tar.bz2' with tarfile.open(tarballFile, 'w:bz2', dereference=True) as \ tarball: tarball.add('jre') os.chdir(os.path.join('..', '..'))
<commit_before><commit_msg>ENH: Add utility script to create JRE tarballs. These tarballs will be uploaded to the Midas server to be used by the SCIFIOImageIO module. Change-Id: I180cf0957221762ddf62b14e3712b70fe19e03bb<commit_after>
#!/usr/bin/env python description = """ Update the JRE tarballs to be bundled with the SCIFIOImageIO plugin. The OpenJDK JRE (but no the JDK) can be redistributed. It is downloaded at build time from Midas and shipped with the SCIFIOImageIO plugin so that the plugin "just works". The Fiji fellows maintain Git repositories that tracks the OpenJDK JRE. Here we clone that repository and create the JRE tarball from it. Currently, the tarball needs to be uploaded manually to midas3.kitware.com. In the future, pydas can be used for automatic upload. """ import os import subprocess import tarfile import tempfile platforms = ['linux-amd64', 'linux', # Newer MacOX will eventually need a download for Java7 (once the security # vulnerabilities are gone. #'macosx', 'win32', 'win64'] os.chdir(tempfile.gettempdir()) for platform in platforms: call = ['git', 'clone', '--depth', '0', 'git://fiji.sc/java/' + platform + '.git'] subprocess.check_call(call) for platform in platforms: print('Writing tarball for ' + platform + '...') os.chdir(platform) version = os.listdir(os.getcwd())[1] os.chdir(version) tarballFile = version + '.tar.bz2' with tarfile.open(tarballFile, 'w:bz2', dereference=True) as \ tarball: tarball.add('jre') os.chdir(os.path.join('..', '..'))
ENH: Add utility script to create JRE tarballs. These tarballs will be uploaded to the Midas server to be used by the SCIFIOImageIO module. Change-Id: I180cf0957221762ddf62b14e3712b70fe19e03bb#!/usr/bin/env python description = """ Update the JRE tarballs to be bundled with the SCIFIOImageIO plugin. The OpenJDK JRE (but no the JDK) can be redistributed. It is downloaded at build time from Midas and shipped with the SCIFIOImageIO plugin so that the plugin "just works". The Fiji fellows maintain Git repositories that tracks the OpenJDK JRE. Here we clone that repository and create the JRE tarball from it. Currently, the tarball needs to be uploaded manually to midas3.kitware.com. In the future, pydas can be used for automatic upload. """ import os import subprocess import tarfile import tempfile platforms = ['linux-amd64', 'linux', # Newer MacOX will eventually need a download for Java7 (once the security # vulnerabilities are gone. #'macosx', 'win32', 'win64'] os.chdir(tempfile.gettempdir()) for platform in platforms: call = ['git', 'clone', '--depth', '0', 'git://fiji.sc/java/' + platform + '.git'] subprocess.check_call(call) for platform in platforms: print('Writing tarball for ' + platform + '...') os.chdir(platform) version = os.listdir(os.getcwd())[1] os.chdir(version) tarballFile = version + '.tar.bz2' with tarfile.open(tarballFile, 'w:bz2', dereference=True) as \ tarball: tarball.add('jre') os.chdir(os.path.join('..', '..'))
<commit_before><commit_msg>ENH: Add utility script to create JRE tarballs. These tarballs will be uploaded to the Midas server to be used by the SCIFIOImageIO module. Change-Id: I180cf0957221762ddf62b14e3712b70fe19e03bb<commit_after>#!/usr/bin/env python description = """ Update the JRE tarballs to be bundled with the SCIFIOImageIO plugin. The OpenJDK JRE (but no the JDK) can be redistributed. It is downloaded at build time from Midas and shipped with the SCIFIOImageIO plugin so that the plugin "just works". The Fiji fellows maintain Git repositories that tracks the OpenJDK JRE. Here we clone that repository and create the JRE tarball from it. Currently, the tarball needs to be uploaded manually to midas3.kitware.com. In the future, pydas can be used for automatic upload. """ import os import subprocess import tarfile import tempfile platforms = ['linux-amd64', 'linux', # Newer MacOX will eventually need a download for Java7 (once the security # vulnerabilities are gone. #'macosx', 'win32', 'win64'] os.chdir(tempfile.gettempdir()) for platform in platforms: call = ['git', 'clone', '--depth', '0', 'git://fiji.sc/java/' + platform + '.git'] subprocess.check_call(call) for platform in platforms: print('Writing tarball for ' + platform + '...') os.chdir(platform) version = os.listdir(os.getcwd())[1] os.chdir(version) tarballFile = version + '.tar.bz2' with tarfile.open(tarballFile, 'w:bz2', dereference=True) as \ tarball: tarball.add('jre') os.chdir(os.path.join('..', '..'))
91b99d08892e8fc758c21a2ddec13ba6f83f4f8b
hackerrank/linked-list/position-from-tail.py
hackerrank/linked-list/position-from-tail.py
# https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail/forum def GetNode(head, position): curr = head tail = head for i in range(position): tail = tail.next while tail.next is not None: curr = curr.next tail = tail.next return curr.data
Solve hackerrank linked list problem
[algorithm] Solve hackerrank linked list problem This is the solution of "get-the-value-of-the-node-at-a-specific-position-from-the-tail" problem.
Python
mit
honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice,honux77/practice
[algorithm] Solve hackerrank linked list problem This is the solution of "get-the-value-of-the-node-at-a-specific-position-from-the-tail" problem.
# https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail/forum def GetNode(head, position): curr = head tail = head for i in range(position): tail = tail.next while tail.next is not None: curr = curr.next tail = tail.next return curr.data
<commit_before><commit_msg>[algorithm] Solve hackerrank linked list problem This is the solution of "get-the-value-of-the-node-at-a-specific-position-from-the-tail" problem.<commit_after>
# https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail/forum def GetNode(head, position): curr = head tail = head for i in range(position): tail = tail.next while tail.next is not None: curr = curr.next tail = tail.next return curr.data
[algorithm] Solve hackerrank linked list problem This is the solution of "get-the-value-of-the-node-at-a-specific-position-from-the-tail" problem.# https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail/forum def GetNode(head, position): curr = head tail = head for i in range(position): tail = tail.next while tail.next is not None: curr = curr.next tail = tail.next return curr.data
<commit_before><commit_msg>[algorithm] Solve hackerrank linked list problem This is the solution of "get-the-value-of-the-node-at-a-specific-position-from-the-tail" problem.<commit_after># https://www.hackerrank.com/challenges/get-the-value-of-the-node-at-a-specific-position-from-the-tail/forum def GetNode(head, position): curr = head tail = head for i in range(position): tail = tail.next while tail.next is not None: curr = curr.next tail = tail.next return curr.data
52f6f1450299aee86730bb430228bf2ed6bb9774
social/apps/django_app/default/migrations/0002_add_related_name.py
social/apps/django_app/default/migrations/0002_add_related_name.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('default', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usersocialauth', name='user', field=models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
Add missing migration for Django app
Add missing migration for Django app
Python
bsd-3-clause
daniula/python-social-auth,python-social-auth/social-app-django,python-social-auth/social-app-django,mrwags/python-social-auth,muhammad-ammar/python-social-auth,clef/python-social-auth,jameslittle/python-social-auth,msampathkumar/python-social-auth,DhiaEddineSaidi/python-social-auth,S01780/python-social-auth,MSOpenTech/python-social-auth,daniula/python-social-auth,joelstanner/python-social-auth,contracode/python-social-auth,merutak/python-social-auth,falcon1kr/python-social-auth,hsr-ba-fs15-dat/python-social-auth,jneves/python-social-auth,cmichal/python-social-auth,drxos/python-social-auth,python-social-auth/social-core,ononeor12/python-social-auth,DhiaEddineSaidi/python-social-auth,webjunkie/python-social-auth,lamby/python-social-auth,JJediny/python-social-auth,wildtetris/python-social-auth,mchdks/python-social-auth,ariestiyansyah/python-social-auth,yprez/python-social-auth,drxos/python-social-auth,JerzySpendel/python-social-auth,merutak/python-social-auth,python-social-auth/social-core,henocdz/python-social-auth,michael-borisov/python-social-auth,yprez/python-social-auth,daniula/python-social-auth,cmichal/python-social-auth,Andygmb/python-social-auth,robbiet480/python-social-auth,falcon1kr/python-social-auth,degs098/python-social-auth,jameslittle/python-social-auth,fearlessspider/python-social-auth,clef/python-social-auth,chandolia/python-social-auth,tobias47n9e/social-core,muhammad-ammar/python-social-auth,barseghyanartur/python-social-auth,nirmalvp/python-social-auth,clef/python-social-auth,mchdks/python-social-auth,rsteca/python-social-auth,mathspace/python-social-auth,ononeor12/python-social-auth,iruga090/python-social-auth,mrwags/python-social-auth,noodle-learns-programming/python-social-auth,firstjob/python-social-auth,S01780/python-social-auth,robbiet480/python-social-auth,noodle-learns-programming/python-social-auth,bjorand/python-social-auth,iruga090/python-social-auth,rsalmaso/python-social-auth,webjunkie/python-social-auth,alrusdi/python-social-auth,drxos/python-social-auth,ononeor12/python-social-auth,lneoe/python-social-auth,MSOpenTech/python-social-auth,ByteInternet/python-social-auth,wildtetris/python-social-auth,merutak/python-social-auth,fearlessspider/python-social-auth,rsteca/python-social-auth,lamby/python-social-auth,S01780/python-social-auth,muhammad-ammar/python-social-auth,Andygmb/python-social-auth,JJediny/python-social-auth,yprez/python-social-auth,lamby/python-social-auth,firstjob/python-social-auth,chandolia/python-social-auth,DhiaEddineSaidi/python-social-auth,alrusdi/python-social-auth,VishvajitP/python-social-auth,hsr-ba-fs15-dat/python-social-auth,msampathkumar/python-social-auth,jeyraof/python-social-auth,fearlessspider/python-social-auth,hsr-ba-fs15-dat/python-social-auth,JerzySpendel/python-social-auth,MSOpenTech/python-social-auth,jeyraof/python-social-auth,jneves/python-social-auth,san-mate/python-social-auth,ByteInternet/python-social-auth,falcon1kr/python-social-auth,python-social-auth/social-app-django,noodle-learns-programming/python-social-auth,joelstanner/python-social-auth,tkajtoch/python-social-auth,degs098/python-social-auth,msampathkumar/python-social-auth,lawrence34/python-social-auth,cjltsod/python-social-auth,VishvajitP/python-social-auth,cjltsod/python-social-auth,webjunkie/python-social-auth,nirmalvp/python-social-auth,lneoe/python-social-auth,wildtetris/python-social-auth,henocdz/python-social-auth,tkajtoch/python-social-auth,nirmalvp/python-social-auth,lawrence34/python-social-auth,contracode/python-social-auth,firstjob/python-social-auth,mchdks/python-social-auth,michael-borisov/python-social-auth,iruga090/python-social-auth,jneves/python-social-auth,mathspace/python-social-auth,ariestiyansyah/python-social-auth,robbiet480/python-social-auth,lneoe/python-social-auth,JJediny/python-social-auth,barseghyanartur/python-social-auth,jameslittle/python-social-auth,joelstanner/python-social-auth,Andygmb/python-social-auth,henocdz/python-social-auth,san-mate/python-social-auth,barseghyanartur/python-social-auth,rsalmaso/python-social-auth,chandolia/python-social-auth,san-mate/python-social-auth,tkajtoch/python-social-auth,lawrence34/python-social-auth,cmichal/python-social-auth,python-social-auth/social-docs,ByteInternet/python-social-auth,bjorand/python-social-auth,rsteca/python-social-auth,michael-borisov/python-social-auth,ariestiyansyah/python-social-auth,python-social-auth/social-app-cherrypy,VishvajitP/python-social-auth,mrwags/python-social-auth,JerzySpendel/python-social-auth,jeyraof/python-social-auth,python-social-auth/social-storage-sqlalchemy,degs098/python-social-auth,contracode/python-social-auth,bjorand/python-social-auth,mathspace/python-social-auth,alrusdi/python-social-auth
Add missing migration for Django app
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('default', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usersocialauth', name='user', field=models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migration for Django app<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('default', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usersocialauth', name='user', field=models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
Add missing migration for Django app# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('default', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usersocialauth', name='user', field=models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
<commit_before><commit_msg>Add missing migration for Django app<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ ('default', '0001_initial'), ] operations = [ migrations.AlterField( model_name='usersocialauth', name='user', field=models.ForeignKey(related_name='social_auth', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
c25b54976a0250863fbf05419d9944e6d337251b
tests/group_test.py
tests/group_test.py
"""Tests for the permutation group class.""" import pickle from drudge import Perm, Group def test_s3_group_correct_and_serializable(): """Test the S3 group. Since the permutation group objects are mostly opaque, here it is tested by its new arguments, which is also further verified to work with pickle. """ cycle = Perm([1, 2, 0]) transp = Perm([1, 0, 2], 1) group = Group([cycle, transp]) args = group.__getnewargs__() assert len(args) == 1 transvs = args[0] assert len(transvs) == 2 top_level = transvs[0] lower_level = transvs[1] assert len(top_level) == 2 target = top_level[0] transv = top_level[1] assert target == 0 assert len(transv) == 2 assert set(i[0][0] for i in transv) == {1, 2} assert len(lower_level) == 2 target = lower_level[0] transv = lower_level[1] assert target == 1 assert len(transv) == 1 perm = transv[0] # This permutation fixes 0, but is not identity. It must be the # transposition of 1 and 2 assert perm[0][0] == 0 assert perm[0][1] == 2 assert perm[0][2] == 1 assert perm[1] == 1 # Args should not change from the group reconstructed from pickle. pickled = pickle.dumps(group) new_group = pickle.loads(pickled) assert new_group.__getnewargs__() == args
Add tests for permutation group class
Add tests for permutation group class The permutation group class is tested with the S3 group. This also tests the correctness of the Schreier-Sims algorithm in libcanon. The picklability is also tested.
Python
mit
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
Add tests for permutation group class The permutation group class is tested with the S3 group. This also tests the correctness of the Schreier-Sims algorithm in libcanon. The picklability is also tested.
"""Tests for the permutation group class.""" import pickle from drudge import Perm, Group def test_s3_group_correct_and_serializable(): """Test the S3 group. Since the permutation group objects are mostly opaque, here it is tested by its new arguments, which is also further verified to work with pickle. """ cycle = Perm([1, 2, 0]) transp = Perm([1, 0, 2], 1) group = Group([cycle, transp]) args = group.__getnewargs__() assert len(args) == 1 transvs = args[0] assert len(transvs) == 2 top_level = transvs[0] lower_level = transvs[1] assert len(top_level) == 2 target = top_level[0] transv = top_level[1] assert target == 0 assert len(transv) == 2 assert set(i[0][0] for i in transv) == {1, 2} assert len(lower_level) == 2 target = lower_level[0] transv = lower_level[1] assert target == 1 assert len(transv) == 1 perm = transv[0] # This permutation fixes 0, but is not identity. It must be the # transposition of 1 and 2 assert perm[0][0] == 0 assert perm[0][1] == 2 assert perm[0][2] == 1 assert perm[1] == 1 # Args should not change from the group reconstructed from pickle. pickled = pickle.dumps(group) new_group = pickle.loads(pickled) assert new_group.__getnewargs__() == args
<commit_before><commit_msg>Add tests for permutation group class The permutation group class is tested with the S3 group. This also tests the correctness of the Schreier-Sims algorithm in libcanon. The picklability is also tested.<commit_after>
"""Tests for the permutation group class.""" import pickle from drudge import Perm, Group def test_s3_group_correct_and_serializable(): """Test the S3 group. Since the permutation group objects are mostly opaque, here it is tested by its new arguments, which is also further verified to work with pickle. """ cycle = Perm([1, 2, 0]) transp = Perm([1, 0, 2], 1) group = Group([cycle, transp]) args = group.__getnewargs__() assert len(args) == 1 transvs = args[0] assert len(transvs) == 2 top_level = transvs[0] lower_level = transvs[1] assert len(top_level) == 2 target = top_level[0] transv = top_level[1] assert target == 0 assert len(transv) == 2 assert set(i[0][0] for i in transv) == {1, 2} assert len(lower_level) == 2 target = lower_level[0] transv = lower_level[1] assert target == 1 assert len(transv) == 1 perm = transv[0] # This permutation fixes 0, but is not identity. It must be the # transposition of 1 and 2 assert perm[0][0] == 0 assert perm[0][1] == 2 assert perm[0][2] == 1 assert perm[1] == 1 # Args should not change from the group reconstructed from pickle. pickled = pickle.dumps(group) new_group = pickle.loads(pickled) assert new_group.__getnewargs__() == args
Add tests for permutation group class The permutation group class is tested with the S3 group. This also tests the correctness of the Schreier-Sims algorithm in libcanon. The picklability is also tested."""Tests for the permutation group class.""" import pickle from drudge import Perm, Group def test_s3_group_correct_and_serializable(): """Test the S3 group. Since the permutation group objects are mostly opaque, here it is tested by its new arguments, which is also further verified to work with pickle. """ cycle = Perm([1, 2, 0]) transp = Perm([1, 0, 2], 1) group = Group([cycle, transp]) args = group.__getnewargs__() assert len(args) == 1 transvs = args[0] assert len(transvs) == 2 top_level = transvs[0] lower_level = transvs[1] assert len(top_level) == 2 target = top_level[0] transv = top_level[1] assert target == 0 assert len(transv) == 2 assert set(i[0][0] for i in transv) == {1, 2} assert len(lower_level) == 2 target = lower_level[0] transv = lower_level[1] assert target == 1 assert len(transv) == 1 perm = transv[0] # This permutation fixes 0, but is not identity. It must be the # transposition of 1 and 2 assert perm[0][0] == 0 assert perm[0][1] == 2 assert perm[0][2] == 1 assert perm[1] == 1 # Args should not change from the group reconstructed from pickle. pickled = pickle.dumps(group) new_group = pickle.loads(pickled) assert new_group.__getnewargs__() == args
<commit_before><commit_msg>Add tests for permutation group class The permutation group class is tested with the S3 group. This also tests the correctness of the Schreier-Sims algorithm in libcanon. The picklability is also tested.<commit_after>"""Tests for the permutation group class.""" import pickle from drudge import Perm, Group def test_s3_group_correct_and_serializable(): """Test the S3 group. Since the permutation group objects are mostly opaque, here it is tested by its new arguments, which is also further verified to work with pickle. """ cycle = Perm([1, 2, 0]) transp = Perm([1, 0, 2], 1) group = Group([cycle, transp]) args = group.__getnewargs__() assert len(args) == 1 transvs = args[0] assert len(transvs) == 2 top_level = transvs[0] lower_level = transvs[1] assert len(top_level) == 2 target = top_level[0] transv = top_level[1] assert target == 0 assert len(transv) == 2 assert set(i[0][0] for i in transv) == {1, 2} assert len(lower_level) == 2 target = lower_level[0] transv = lower_level[1] assert target == 1 assert len(transv) == 1 perm = transv[0] # This permutation fixes 0, but is not identity. It must be the # transposition of 1 and 2 assert perm[0][0] == 0 assert perm[0][1] == 2 assert perm[0][2] == 1 assert perm[1] == 1 # Args should not change from the group reconstructed from pickle. pickled = pickle.dumps(group) new_group = pickle.loads(pickled) assert new_group.__getnewargs__() == args
515dd6c5e21c8911d6c9ed21f438b39706bcf99a
tests/images/test_image_widgets.py
tests/images/test_image_widgets.py
from django.core.files.uploadedfile import SimpleUploadedFile from adhocracy4.images import widgets def test_image_input_delete_presedence(): input = widgets.ImageInputWidget() jpeg_file = SimpleUploadedFile('test.jpg', b'file content', content_type='image/jpeg') data = {'test_image-clear': 'on'} files = {'test_image': jpeg_file} value = input.value_from_datadict(data, files, 'test_image') assert value is False value = input.value_from_datadict(data, {}, 'test_image') assert value is False
Add test for image upload widget
Add test for image upload widget
Python
agpl-3.0
liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4
Add test for image upload widget
from django.core.files.uploadedfile import SimpleUploadedFile from adhocracy4.images import widgets def test_image_input_delete_presedence(): input = widgets.ImageInputWidget() jpeg_file = SimpleUploadedFile('test.jpg', b'file content', content_type='image/jpeg') data = {'test_image-clear': 'on'} files = {'test_image': jpeg_file} value = input.value_from_datadict(data, files, 'test_image') assert value is False value = input.value_from_datadict(data, {}, 'test_image') assert value is False
<commit_before><commit_msg>Add test for image upload widget<commit_after>
from django.core.files.uploadedfile import SimpleUploadedFile from adhocracy4.images import widgets def test_image_input_delete_presedence(): input = widgets.ImageInputWidget() jpeg_file = SimpleUploadedFile('test.jpg', b'file content', content_type='image/jpeg') data = {'test_image-clear': 'on'} files = {'test_image': jpeg_file} value = input.value_from_datadict(data, files, 'test_image') assert value is False value = input.value_from_datadict(data, {}, 'test_image') assert value is False
Add test for image upload widgetfrom django.core.files.uploadedfile import SimpleUploadedFile from adhocracy4.images import widgets def test_image_input_delete_presedence(): input = widgets.ImageInputWidget() jpeg_file = SimpleUploadedFile('test.jpg', b'file content', content_type='image/jpeg') data = {'test_image-clear': 'on'} files = {'test_image': jpeg_file} value = input.value_from_datadict(data, files, 'test_image') assert value is False value = input.value_from_datadict(data, {}, 'test_image') assert value is False
<commit_before><commit_msg>Add test for image upload widget<commit_after>from django.core.files.uploadedfile import SimpleUploadedFile from adhocracy4.images import widgets def test_image_input_delete_presedence(): input = widgets.ImageInputWidget() jpeg_file = SimpleUploadedFile('test.jpg', b'file content', content_type='image/jpeg') data = {'test_image-clear': 'on'} files = {'test_image': jpeg_file} value = input.value_from_datadict(data, files, 'test_image') assert value is False value = input.value_from_datadict(data, {}, 'test_image') assert value is False
d5106925d90ac5c0454279c31535381b45958a95
print_liwc_cat.py
print_liwc_cat.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to print all words in a LIWC category. Usage: python print_liwc_cat.py <dictionary file> <category> 2014-11-18 j.vanderzwaan@esciencecenter.nl """ import argparse import codecs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dict_file', help='the name of the LIWC dictionary ' 'file') parser.add_argument('liwc_cat', help='the name of the LIWC category to ' 'print the words for') args = parser.parse_args() # Load liwc dict with codecs.open(args.dict_file, 'rb', 'latin1') as f: lines = f.readlines() liwc_categories = {} liwc_dict = {} for line in lines: # LIWC category if line[0].isdigit(): entry = line.split() # remove 0 from strings like 01 c = str(int(entry[0])) liwc_categories[c] = entry[1] # word elif line[0].isalpha(): entry = line.split() term = entry[0] categories = entry[1:] liwc_dict[term] = categories # Make dictionary of the form {liwc category: [word, word, word, ...]} liwc = {} for term, cats in liwc_dict.iteritems(): for c in cats: cat = liwc_categories.get(c) if cat not in liwc.keys(): liwc[cat] = [] liwc[cat].append(term) cat = args.liwc_cat.lower() if liwc.get(cat): print 'LIWC words for {} ({} words)'.format(cat, len(liwc[cat])) print ' - '.join(liwc[cat]) else: print 'Category "{}" not found in LIWC dictionary.'.format(cat)
Add script to print words for LIWC categories
Add script to print words for LIWC categories Given a LIWC dictionary and a category, the script prints the words in the category.
Python
apache-2.0
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
Add script to print words for LIWC categories Given a LIWC dictionary and a category, the script prints the words in the category.
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to print all words in a LIWC category. Usage: python print_liwc_cat.py <dictionary file> <category> 2014-11-18 j.vanderzwaan@esciencecenter.nl """ import argparse import codecs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dict_file', help='the name of the LIWC dictionary ' 'file') parser.add_argument('liwc_cat', help='the name of the LIWC category to ' 'print the words for') args = parser.parse_args() # Load liwc dict with codecs.open(args.dict_file, 'rb', 'latin1') as f: lines = f.readlines() liwc_categories = {} liwc_dict = {} for line in lines: # LIWC category if line[0].isdigit(): entry = line.split() # remove 0 from strings like 01 c = str(int(entry[0])) liwc_categories[c] = entry[1] # word elif line[0].isalpha(): entry = line.split() term = entry[0] categories = entry[1:] liwc_dict[term] = categories # Make dictionary of the form {liwc category: [word, word, word, ...]} liwc = {} for term, cats in liwc_dict.iteritems(): for c in cats: cat = liwc_categories.get(c) if cat not in liwc.keys(): liwc[cat] = [] liwc[cat].append(term) cat = args.liwc_cat.lower() if liwc.get(cat): print 'LIWC words for {} ({} words)'.format(cat, len(liwc[cat])) print ' - '.join(liwc[cat]) else: print 'Category "{}" not found in LIWC dictionary.'.format(cat)
<commit_before><commit_msg>Add script to print words for LIWC categories Given a LIWC dictionary and a category, the script prints the words in the category.<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to print all words in a LIWC category. Usage: python print_liwc_cat.py <dictionary file> <category> 2014-11-18 j.vanderzwaan@esciencecenter.nl """ import argparse import codecs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dict_file', help='the name of the LIWC dictionary ' 'file') parser.add_argument('liwc_cat', help='the name of the LIWC category to ' 'print the words for') args = parser.parse_args() # Load liwc dict with codecs.open(args.dict_file, 'rb', 'latin1') as f: lines = f.readlines() liwc_categories = {} liwc_dict = {} for line in lines: # LIWC category if line[0].isdigit(): entry = line.split() # remove 0 from strings like 01 c = str(int(entry[0])) liwc_categories[c] = entry[1] # word elif line[0].isalpha(): entry = line.split() term = entry[0] categories = entry[1:] liwc_dict[term] = categories # Make dictionary of the form {liwc category: [word, word, word, ...]} liwc = {} for term, cats in liwc_dict.iteritems(): for c in cats: cat = liwc_categories.get(c) if cat not in liwc.keys(): liwc[cat] = [] liwc[cat].append(term) cat = args.liwc_cat.lower() if liwc.get(cat): print 'LIWC words for {} ({} words)'.format(cat, len(liwc[cat])) print ' - '.join(liwc[cat]) else: print 'Category "{}" not found in LIWC dictionary.'.format(cat)
Add script to print words for LIWC categories Given a LIWC dictionary and a category, the script prints the words in the category.#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to print all words in a LIWC category. Usage: python print_liwc_cat.py <dictionary file> <category> 2014-11-18 j.vanderzwaan@esciencecenter.nl """ import argparse import codecs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dict_file', help='the name of the LIWC dictionary ' 'file') parser.add_argument('liwc_cat', help='the name of the LIWC category to ' 'print the words for') args = parser.parse_args() # Load liwc dict with codecs.open(args.dict_file, 'rb', 'latin1') as f: lines = f.readlines() liwc_categories = {} liwc_dict = {} for line in lines: # LIWC category if line[0].isdigit(): entry = line.split() # remove 0 from strings like 01 c = str(int(entry[0])) liwc_categories[c] = entry[1] # word elif line[0].isalpha(): entry = line.split() term = entry[0] categories = entry[1:] liwc_dict[term] = categories # Make dictionary of the form {liwc category: [word, word, word, ...]} liwc = {} for term, cats in liwc_dict.iteritems(): for c in cats: cat = liwc_categories.get(c) if cat not in liwc.keys(): liwc[cat] = [] liwc[cat].append(term) cat = args.liwc_cat.lower() if liwc.get(cat): print 'LIWC words for {} ({} words)'.format(cat, len(liwc[cat])) print ' - '.join(liwc[cat]) else: print 'Category "{}" not found in LIWC dictionary.'.format(cat)
<commit_before><commit_msg>Add script to print words for LIWC categories Given a LIWC dictionary and a category, the script prints the words in the category.<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to print all words in a LIWC category. Usage: python print_liwc_cat.py <dictionary file> <category> 2014-11-18 j.vanderzwaan@esciencecenter.nl """ import argparse import codecs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dict_file', help='the name of the LIWC dictionary ' 'file') parser.add_argument('liwc_cat', help='the name of the LIWC category to ' 'print the words for') args = parser.parse_args() # Load liwc dict with codecs.open(args.dict_file, 'rb', 'latin1') as f: lines = f.readlines() liwc_categories = {} liwc_dict = {} for line in lines: # LIWC category if line[0].isdigit(): entry = line.split() # remove 0 from strings like 01 c = str(int(entry[0])) liwc_categories[c] = entry[1] # word elif line[0].isalpha(): entry = line.split() term = entry[0] categories = entry[1:] liwc_dict[term] = categories # Make dictionary of the form {liwc category: [word, word, word, ...]} liwc = {} for term, cats in liwc_dict.iteritems(): for c in cats: cat = liwc_categories.get(c) if cat not in liwc.keys(): liwc[cat] = [] liwc[cat].append(term) cat = args.liwc_cat.lower() if liwc.get(cat): print 'LIWC words for {} ({} words)'.format(cat, len(liwc[cat])) print ' - '.join(liwc[cat]) else: print 'Category "{}" not found in LIWC dictionary.'.format(cat)
e5ffed089fe5727e7711a685a62100a779f80fd7
tests/test_commands/test_clean.py
tests/test_commands/test_clean.py
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import yaml import shutil from unittest import TestCase from project_generator.commands import export, clean from .simple_project import project_1_yaml, projects_yaml, project_2_yaml class TestCleanCommand(TestCase): """test clean command""" def setUp(self): if not os.path.exists('test_workspace'): os.makedirs('test_workspace') # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f: f.write(yaml.dump(project_1_yaml, default_flow_style=False)) # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f: f.write(yaml.dump(project_2_yaml, default_flow_style=False)) # write projects file with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f: f.write(yaml.dump(projects_yaml, default_flow_style=False)) self.parser = argparse.ArgumentParser() subparsers = self.parser.add_subparsers(help='commands') self.export_subparser = subparsers.add_parser('export', help=export.help) self.clean_subparser = subparsers.add_parser('clean', help=export.help) def tearDown(self): # remove created directory shutil.rmtree('test_workspace', ignore_errors=True) def test_clean_one_project(self): # We first export project, then clean it export.setup(self.export_subparser) args = self.parser.parse_args(['export','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) export.run(args) # this should export a project to generated_projects/uvision_project_2/project_2.uvproj assert os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') # now clean clean.setup(self.clean_subparser) args = self.parser.parse_args(['clean','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) result = clean.run(args) assert not os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') assert not os.path.isdir('generated_projects/uvision_project_2')
Test commands clean - addition
Test commands clean - addition
Python
apache-2.0
molejar/project_generator,project-generator/project_generator,ohagendorf/project_generator,sarahmarshy/project_generator,0xc0170/project_generator,hwfwgrp/project_generator
Test commands clean - addition
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import yaml import shutil from unittest import TestCase from project_generator.commands import export, clean from .simple_project import project_1_yaml, projects_yaml, project_2_yaml class TestCleanCommand(TestCase): """test clean command""" def setUp(self): if not os.path.exists('test_workspace'): os.makedirs('test_workspace') # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f: f.write(yaml.dump(project_1_yaml, default_flow_style=False)) # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f: f.write(yaml.dump(project_2_yaml, default_flow_style=False)) # write projects file with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f: f.write(yaml.dump(projects_yaml, default_flow_style=False)) self.parser = argparse.ArgumentParser() subparsers = self.parser.add_subparsers(help='commands') self.export_subparser = subparsers.add_parser('export', help=export.help) self.clean_subparser = subparsers.add_parser('clean', help=export.help) def tearDown(self): # remove created directory shutil.rmtree('test_workspace', ignore_errors=True) def test_clean_one_project(self): # We first export project, then clean it export.setup(self.export_subparser) args = self.parser.parse_args(['export','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) export.run(args) # this should export a project to generated_projects/uvision_project_2/project_2.uvproj assert os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') # now clean clean.setup(self.clean_subparser) args = self.parser.parse_args(['clean','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) result = clean.run(args) assert not os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') assert not os.path.isdir('generated_projects/uvision_project_2')
<commit_before><commit_msg>Test commands clean - addition<commit_after>
# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import yaml import shutil from unittest import TestCase from project_generator.commands import export, clean from .simple_project import project_1_yaml, projects_yaml, project_2_yaml class TestCleanCommand(TestCase): """test clean command""" def setUp(self): if not os.path.exists('test_workspace'): os.makedirs('test_workspace') # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f: f.write(yaml.dump(project_1_yaml, default_flow_style=False)) # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f: f.write(yaml.dump(project_2_yaml, default_flow_style=False)) # write projects file with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f: f.write(yaml.dump(projects_yaml, default_flow_style=False)) self.parser = argparse.ArgumentParser() subparsers = self.parser.add_subparsers(help='commands') self.export_subparser = subparsers.add_parser('export', help=export.help) self.clean_subparser = subparsers.add_parser('clean', help=export.help) def tearDown(self): # remove created directory shutil.rmtree('test_workspace', ignore_errors=True) def test_clean_one_project(self): # We first export project, then clean it export.setup(self.export_subparser) args = self.parser.parse_args(['export','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) export.run(args) # this should export a project to generated_projects/uvision_project_2/project_2.uvproj assert os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') # now clean clean.setup(self.clean_subparser) args = self.parser.parse_args(['clean','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) result = clean.run(args) assert not os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') assert not os.path.isdir('generated_projects/uvision_project_2')
Test commands clean - addition# Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import yaml import shutil from unittest import TestCase from project_generator.commands import export, clean from .simple_project import project_1_yaml, projects_yaml, project_2_yaml class TestCleanCommand(TestCase): """test clean command""" def setUp(self): if not os.path.exists('test_workspace'): os.makedirs('test_workspace') # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f: f.write(yaml.dump(project_1_yaml, default_flow_style=False)) # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f: f.write(yaml.dump(project_2_yaml, default_flow_style=False)) # write projects file with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f: f.write(yaml.dump(projects_yaml, default_flow_style=False)) self.parser = argparse.ArgumentParser() subparsers = self.parser.add_subparsers(help='commands') self.export_subparser = subparsers.add_parser('export', help=export.help) self.clean_subparser = subparsers.add_parser('clean', help=export.help) def tearDown(self): # remove created directory shutil.rmtree('test_workspace', ignore_errors=True) def test_clean_one_project(self): # We first export project, then clean it export.setup(self.export_subparser) args = self.parser.parse_args(['export','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) export.run(args) # this should export a project to generated_projects/uvision_project_2/project_2.uvproj assert os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') # now clean clean.setup(self.clean_subparser) args = self.parser.parse_args(['clean','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) result = clean.run(args) assert not os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') assert not os.path.isdir('generated_projects/uvision_project_2')
<commit_before><commit_msg>Test commands clean - addition<commit_after># Copyright 2015 0xc0170 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import yaml import shutil from unittest import TestCase from project_generator.commands import export, clean from .simple_project import project_1_yaml, projects_yaml, project_2_yaml class TestCleanCommand(TestCase): """test clean command""" def setUp(self): if not os.path.exists('test_workspace'): os.makedirs('test_workspace') # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_1.yaml'), 'wt') as f: f.write(yaml.dump(project_1_yaml, default_flow_style=False)) # write project file with open(os.path.join(os.getcwd(), 'test_workspace/project_2.yaml'), 'wt') as f: f.write(yaml.dump(project_2_yaml, default_flow_style=False)) # write projects file with open(os.path.join(os.getcwd(), 'test_workspace/projects.yaml'), 'wt') as f: f.write(yaml.dump(projects_yaml, default_flow_style=False)) self.parser = argparse.ArgumentParser() subparsers = self.parser.add_subparsers(help='commands') self.export_subparser = subparsers.add_parser('export', help=export.help) self.clean_subparser = subparsers.add_parser('clean', help=export.help) def tearDown(self): # remove created directory shutil.rmtree('test_workspace', ignore_errors=True) def test_clean_one_project(self): # We first export project, then clean it export.setup(self.export_subparser) args = self.parser.parse_args(['export','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) export.run(args) # this should export a project to generated_projects/uvision_project_2/project_2.uvproj assert os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') # now clean clean.setup(self.clean_subparser) args = self.parser.parse_args(['clean','-f','test_workspace/projects.yaml','-p','project_2', '-t', 'uvision']) result = clean.run(args) assert not os.path.isfile('generated_projects/uvision_project_2/project_2.uvproj') assert not os.path.isdir('generated_projects/uvision_project_2')
7a2b962012067d509a07d00dfd55319c428585ff
osf/migrations/0174_add_ab_testing_home_page_version_b_flag.py
osf/migrations/0174_add_ab_testing_home_page_version_b_flag.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-07-23 14:26 from __future__ import unicode_literals from django.db import migrations from osf import features from osf.utils.migrations import AddWaffleFlags class Migration(migrations.Migration): dependencies = [ ('osf', '0173_ensure_schemas'), ] operations = [ AddWaffleFlags([features.EMBER_AB_TESTING_HOME_PAGE_VERSION_B]), ]
Rename migration and fix dependency
Rename migration and fix dependency
Python
apache-2.0
mfraezz/osf.io,saradbowman/osf.io,cslzchen/osf.io,aaxelb/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,baylee-d/osf.io,felliott/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,adlius/osf.io,baylee-d/osf.io,felliott/osf.io,mfraezz/osf.io,adlius/osf.io,felliott/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,adlius/osf.io,mfraezz/osf.io,aaxelb/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,felliott/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,baylee-d/osf.io,Johnetordoff/osf.io
Rename migration and fix dependency
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-07-23 14:26 from __future__ import unicode_literals from django.db import migrations from osf import features from osf.utils.migrations import AddWaffleFlags class Migration(migrations.Migration): dependencies = [ ('osf', '0173_ensure_schemas'), ] operations = [ AddWaffleFlags([features.EMBER_AB_TESTING_HOME_PAGE_VERSION_B]), ]
<commit_before><commit_msg>Rename migration and fix dependency<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-07-23 14:26 from __future__ import unicode_literals from django.db import migrations from osf import features from osf.utils.migrations import AddWaffleFlags class Migration(migrations.Migration): dependencies = [ ('osf', '0173_ensure_schemas'), ] operations = [ AddWaffleFlags([features.EMBER_AB_TESTING_HOME_PAGE_VERSION_B]), ]
Rename migration and fix dependency# -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-07-23 14:26 from __future__ import unicode_literals from django.db import migrations from osf import features from osf.utils.migrations import AddWaffleFlags class Migration(migrations.Migration): dependencies = [ ('osf', '0173_ensure_schemas'), ] operations = [ AddWaffleFlags([features.EMBER_AB_TESTING_HOME_PAGE_VERSION_B]), ]
<commit_before><commit_msg>Rename migration and fix dependency<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.13 on 2019-07-23 14:26 from __future__ import unicode_literals from django.db import migrations from osf import features from osf.utils.migrations import AddWaffleFlags class Migration(migrations.Migration): dependencies = [ ('osf', '0173_ensure_schemas'), ] operations = [ AddWaffleFlags([features.EMBER_AB_TESTING_HOME_PAGE_VERSION_B]), ]
fbc4646c791d37e3360cac30ef590e5a242312d9
tests/PlanetPopulation/test_EarthTwinHabZone.py
tests/PlanetPopulation/test_EarthTwinHabZone.py
r"""Test code for modules EarthTwinHabZone1 and EarthTwinHabZone2 within EXOSIMS PlanetPopulation. Cate Liu, IPAC, 2016""" import unittest import EXOSIMS from EXOSIMS import MissionSim from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1 from EXOSIMS.PlanetPopulation.EarthTwinHabZone2 import EarthTwinHabZone2 import os import numpy as np from astropy import units as u import scipy.stats class TestEarthTwinHabZone(unittest.TestCase): def setUp(self): self.spec = {'modules':{'PlanetPhysicalModel': ''}} pass def tearDown(self): pass def test_gen_plan_params1(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a in arange """ obj = EarthTwinHabZone1(**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(e == 0)) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) h = np.histogram(a.to('AU').value,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(obj.arange.to('AU').value)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) def test_gen_plan_params2(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a,e in arange,erange """ obj = EarthTwinHabZone2(constrainOrbits=False,erange=[0.1,0.5],**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) for param,param_range in zip([a.value,e],[obj.arange.value,obj.erange]): h = np.histogram(param,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(param_range)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) if __name__ == "__main__": unittest.main()
Put EarthTwinHabZone tests in same new file/class
Put EarthTwinHabZone tests in same new file/class
Python
bsd-3-clause
dsavransky/EXOSIMS,dsavransky/EXOSIMS
Put EarthTwinHabZone tests in same new file/class
r"""Test code for modules EarthTwinHabZone1 and EarthTwinHabZone2 within EXOSIMS PlanetPopulation. Cate Liu, IPAC, 2016""" import unittest import EXOSIMS from EXOSIMS import MissionSim from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1 from EXOSIMS.PlanetPopulation.EarthTwinHabZone2 import EarthTwinHabZone2 import os import numpy as np from astropy import units as u import scipy.stats class TestEarthTwinHabZone(unittest.TestCase): def setUp(self): self.spec = {'modules':{'PlanetPhysicalModel': ''}} pass def tearDown(self): pass def test_gen_plan_params1(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a in arange """ obj = EarthTwinHabZone1(**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(e == 0)) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) h = np.histogram(a.to('AU').value,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(obj.arange.to('AU').value)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) def test_gen_plan_params2(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a,e in arange,erange """ obj = EarthTwinHabZone2(constrainOrbits=False,erange=[0.1,0.5],**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) for param,param_range in zip([a.value,e],[obj.arange.value,obj.erange]): h = np.histogram(param,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(param_range)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Put EarthTwinHabZone tests in same new file/class<commit_after>
r"""Test code for modules EarthTwinHabZone1 and EarthTwinHabZone2 within EXOSIMS PlanetPopulation. Cate Liu, IPAC, 2016""" import unittest import EXOSIMS from EXOSIMS import MissionSim from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1 from EXOSIMS.PlanetPopulation.EarthTwinHabZone2 import EarthTwinHabZone2 import os import numpy as np from astropy import units as u import scipy.stats class TestEarthTwinHabZone(unittest.TestCase): def setUp(self): self.spec = {'modules':{'PlanetPhysicalModel': ''}} pass def tearDown(self): pass def test_gen_plan_params1(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a in arange """ obj = EarthTwinHabZone1(**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(e == 0)) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) h = np.histogram(a.to('AU').value,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(obj.arange.to('AU').value)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) def test_gen_plan_params2(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a,e in arange,erange """ obj = EarthTwinHabZone2(constrainOrbits=False,erange=[0.1,0.5],**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) for param,param_range in zip([a.value,e],[obj.arange.value,obj.erange]): h = np.histogram(param,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(param_range)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) if __name__ == "__main__": unittest.main()
Put EarthTwinHabZone tests in same new file/classr"""Test code for modules EarthTwinHabZone1 and EarthTwinHabZone2 within EXOSIMS PlanetPopulation. Cate Liu, IPAC, 2016""" import unittest import EXOSIMS from EXOSIMS import MissionSim from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1 from EXOSIMS.PlanetPopulation.EarthTwinHabZone2 import EarthTwinHabZone2 import os import numpy as np from astropy import units as u import scipy.stats class TestEarthTwinHabZone(unittest.TestCase): def setUp(self): self.spec = {'modules':{'PlanetPhysicalModel': ''}} pass def tearDown(self): pass def test_gen_plan_params1(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a in arange """ obj = EarthTwinHabZone1(**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(e == 0)) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) h = np.histogram(a.to('AU').value,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(obj.arange.to('AU').value)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) def test_gen_plan_params2(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a,e in arange,erange """ obj = EarthTwinHabZone2(constrainOrbits=False,erange=[0.1,0.5],**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) for param,param_range in zip([a.value,e],[obj.arange.value,obj.erange]): h = np.histogram(param,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(param_range)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Put EarthTwinHabZone tests in same new file/class<commit_after>r"""Test code for modules EarthTwinHabZone1 and EarthTwinHabZone2 within EXOSIMS PlanetPopulation. Cate Liu, IPAC, 2016""" import unittest import EXOSIMS from EXOSIMS import MissionSim from EXOSIMS.Prototypes.PlanetPopulation import PlanetPopulation from EXOSIMS.PlanetPopulation.EarthTwinHabZone1 import EarthTwinHabZone1 from EXOSIMS.PlanetPopulation.EarthTwinHabZone2 import EarthTwinHabZone2 import os import numpy as np from astropy import units as u import scipy.stats class TestEarthTwinHabZone(unittest.TestCase): def setUp(self): self.spec = {'modules':{'PlanetPhysicalModel': ''}} pass def tearDown(self): pass def test_gen_plan_params1(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a in arange """ obj = EarthTwinHabZone1(**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(e == 0)) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) h = np.histogram(a.to('AU').value,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(obj.arange.to('AU').value)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) def test_gen_plan_params2(self): r"""Test generated planet parameters: Expected: all 1 R_E, all p = 0.67, e = 0, and uniform a,e in arange,erange """ obj = EarthTwinHabZone2(constrainOrbits=False,erange=[0.1,0.5],**self.spec) x = 10000 a, e, p, Rp = obj.gen_plan_params(x) assert(np.all(p == 0.367)) assert(np.all(Rp == 1.0*u.R_earth)) for param,param_range in zip([a.value,e],[obj.arange.value,obj.erange]): h = np.histogram(param,100,density=True) chi2 = scipy.stats.chisquare(h[0],[1.0/np.diff(param_range)[0]]*len(h[0])) self.assertGreater(chi2[1], 0.95) if __name__ == "__main__": unittest.main()
684c208bfec727cd66bd4a2eccedbfeea1c59d0a
alembic/versions/32ccd6db0955_add_quantity_to_shopping_items.py
alembic/versions/32ccd6db0955_add_quantity_to_shopping_items.py
"""Add quantity to shopping items Revision ID: 32ccd6db0955 Revises: 430dc4ed6dd6 Create Date: 2015-01-06 21:59:03.761529 """ # revision identifiers, used by Alembic. revision = '32ccd6db0955' down_revision = '430dc4ed6dd6' from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass
Add quantity to shopping items
Add quantity to shopping items
Python
mit
jlutz777/FreeStore,jlutz777/FreeStore,jlutz777/FreeStore
Add quantity to shopping items
"""Add quantity to shopping items Revision ID: 32ccd6db0955 Revises: 430dc4ed6dd6 Create Date: 2015-01-06 21:59:03.761529 """ # revision identifiers, used by Alembic. revision = '32ccd6db0955' down_revision = '430dc4ed6dd6' from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass
<commit_before><commit_msg>Add quantity to shopping items<commit_after>
"""Add quantity to shopping items Revision ID: 32ccd6db0955 Revises: 430dc4ed6dd6 Create Date: 2015-01-06 21:59:03.761529 """ # revision identifiers, used by Alembic. revision = '32ccd6db0955' down_revision = '430dc4ed6dd6' from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass
Add quantity to shopping items"""Add quantity to shopping items Revision ID: 32ccd6db0955 Revises: 430dc4ed6dd6 Create Date: 2015-01-06 21:59:03.761529 """ # revision identifiers, used by Alembic. revision = '32ccd6db0955' down_revision = '430dc4ed6dd6' from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass
<commit_before><commit_msg>Add quantity to shopping items<commit_after>"""Add quantity to shopping items Revision ID: 32ccd6db0955 Revises: 430dc4ed6dd6 Create Date: 2015-01-06 21:59:03.761529 """ # revision identifiers, used by Alembic. revision = '32ccd6db0955' down_revision = '430dc4ed6dd6' from alembic import op import sqlalchemy as sa def upgrade(): pass def downgrade(): pass
0915d71c8d243cd2a7c8c231461458d961a510bf
zou/migrations/versions/a23682ccc1f1_.py
zou/migrations/versions/a23682ccc1f1_.py
"""empty message Revision ID: a23682ccc1f1 Revises: 9bd17364fc18 Create Date: 2018-04-20 10:39:31.976959 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = 'a23682ccc1f1' down_revision = '9bd17364fc18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('desktop_login_log', sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['person_id'], ['person.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_desktop_login_log_person_id'), 'desktop_login_log', ['person_id'], unique=False) op.add_column('asset_instance', sa.Column('active', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('asset_instance', 'active') op.drop_index(op.f('ix_desktop_login_log_person_id'), table_name='desktop_login_log') op.drop_table('desktop_login_log') # ### end Alembic commands ###
Add migration scripts for DB schema
Add migration scripts for DB schema * Add table to log desktop logins * Add active field for asset instance
Python
agpl-3.0
cgwire/zou
Add migration scripts for DB schema * Add table to log desktop logins * Add active field for asset instance
"""empty message Revision ID: a23682ccc1f1 Revises: 9bd17364fc18 Create Date: 2018-04-20 10:39:31.976959 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = 'a23682ccc1f1' down_revision = '9bd17364fc18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('desktop_login_log', sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['person_id'], ['person.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_desktop_login_log_person_id'), 'desktop_login_log', ['person_id'], unique=False) op.add_column('asset_instance', sa.Column('active', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('asset_instance', 'active') op.drop_index(op.f('ix_desktop_login_log_person_id'), table_name='desktop_login_log') op.drop_table('desktop_login_log') # ### end Alembic commands ###
<commit_before><commit_msg>Add migration scripts for DB schema * Add table to log desktop logins * Add active field for asset instance<commit_after>
"""empty message Revision ID: a23682ccc1f1 Revises: 9bd17364fc18 Create Date: 2018-04-20 10:39:31.976959 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = 'a23682ccc1f1' down_revision = '9bd17364fc18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('desktop_login_log', sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['person_id'], ['person.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_desktop_login_log_person_id'), 'desktop_login_log', ['person_id'], unique=False) op.add_column('asset_instance', sa.Column('active', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('asset_instance', 'active') op.drop_index(op.f('ix_desktop_login_log_person_id'), table_name='desktop_login_log') op.drop_table('desktop_login_log') # ### end Alembic commands ###
Add migration scripts for DB schema * Add table to log desktop logins * Add active field for asset instance"""empty message Revision ID: a23682ccc1f1 Revises: 9bd17364fc18 Create Date: 2018-04-20 10:39:31.976959 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = 'a23682ccc1f1' down_revision = '9bd17364fc18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('desktop_login_log', sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['person_id'], ['person.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_desktop_login_log_person_id'), 'desktop_login_log', ['person_id'], unique=False) op.add_column('asset_instance', sa.Column('active', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('asset_instance', 'active') op.drop_index(op.f('ix_desktop_login_log_person_id'), table_name='desktop_login_log') op.drop_table('desktop_login_log') # ### end Alembic commands ###
<commit_before><commit_msg>Add migration scripts for DB schema * Add table to log desktop logins * Add active field for asset instance<commit_after>"""empty message Revision ID: a23682ccc1f1 Revises: 9bd17364fc18 Create Date: 2018-04-20 10:39:31.976959 """ from alembic import op import sqlalchemy as sa import sqlalchemy_utils # revision identifiers, used by Alembic. revision = 'a23682ccc1f1' down_revision = '9bd17364fc18' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('desktop_login_log', sa.Column('id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('person_id', sqlalchemy_utils.types.uuid.UUIDType(binary=False), nullable=False), sa.Column('date', sa.DateTime(), nullable=False), sa.ForeignKeyConstraint(['person_id'], ['person.id'], ), sa.PrimaryKeyConstraint('id') ) op.create_index(op.f('ix_desktop_login_log_person_id'), 'desktop_login_log', ['person_id'], unique=False) op.add_column('asset_instance', sa.Column('active', sa.Boolean(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('asset_instance', 'active') op.drop_index(op.f('ix_desktop_login_log_person_id'), table_name='desktop_login_log') op.drop_table('desktop_login_log') # ### end Alembic commands ###
466622e95e5243a2d5a9d6a12f1b7735c4fb5637
test/test_automated_analysis.py
test/test_automated_analysis.py
"""This directory is setup with configurations to run the main functional test. It exercises a full analysis pipeline on a smaller subset of data. """ import os import subprocess import unittest import shutil import contextlib import collections import functools from nose import SkipTest from nose.plugins.attrib import attr import yaml @contextlib.contextmanager def make_workdir(): remove_old_dir = True dirname = os.path.join(os.path.dirname(__file__), "test_automated_output") if remove_old_dir: if os.path.exists(dirname): shutil.rmtree(dirname) os.makedirs(dirname) orig_dir = os.getcwd() try: os.chdir(dirname) yield dirname finally: os.chdir(orig_dir) def expected_failure(test): """Small decorator to mark tests as expected failure. Useful for tests that are work-in-progress. """ @functools.wraps(test) def inner(*args, **kwargs): try: test(*args, **kwargs) except Exception: raise SkipTest else: raise AssertionError('Failure expected') return inner class AutomatedAnalysisTest(unittest.TestCase): """Setup a full automated analysis and run the pipeline. """ def setUp(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated") def _install_test_files(self, data_dir): """Download required sequence and reference files. """ # self._download_to_dir(url, dirname) def _download_to_dir(self, url, dirname): print dirname cl = ["wget", url] subprocess.check_call(cl) cl = ["tar", "-xzvpf", os.path.basename(url)] subprocess.check_call(cl) shutil.move(os.path.basename(dirname), dirname) os.remove(os.path.basename(url)) @attr(complete=True) @attr(cluster=True) def test_srnaseq_star(self): """Run cluster analysis """ # self._install_test_files(self.data_dir) with make_workdir() as workdir: cl = ["seqcluster", "cluster", "-m", "../../data/examples/cluster/seqs.ma", "-a", "../../data/examples/clusters/seqs.bam", "-gtf", "../../data/examples/cluster/annotation_red.gtf", "-o", "test_out_res"] subprocess.check_call(cl)
Add automated test inspared in bcbio-nextgen code
Add automated test inspared in bcbio-nextgen code
Python
mit
lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster,lpantano/seqcluster
Add automated test inspared in bcbio-nextgen code
"""This directory is setup with configurations to run the main functional test. It exercises a full analysis pipeline on a smaller subset of data. """ import os import subprocess import unittest import shutil import contextlib import collections import functools from nose import SkipTest from nose.plugins.attrib import attr import yaml @contextlib.contextmanager def make_workdir(): remove_old_dir = True dirname = os.path.join(os.path.dirname(__file__), "test_automated_output") if remove_old_dir: if os.path.exists(dirname): shutil.rmtree(dirname) os.makedirs(dirname) orig_dir = os.getcwd() try: os.chdir(dirname) yield dirname finally: os.chdir(orig_dir) def expected_failure(test): """Small decorator to mark tests as expected failure. Useful for tests that are work-in-progress. """ @functools.wraps(test) def inner(*args, **kwargs): try: test(*args, **kwargs) except Exception: raise SkipTest else: raise AssertionError('Failure expected') return inner class AutomatedAnalysisTest(unittest.TestCase): """Setup a full automated analysis and run the pipeline. """ def setUp(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated") def _install_test_files(self, data_dir): """Download required sequence and reference files. """ # self._download_to_dir(url, dirname) def _download_to_dir(self, url, dirname): print dirname cl = ["wget", url] subprocess.check_call(cl) cl = ["tar", "-xzvpf", os.path.basename(url)] subprocess.check_call(cl) shutil.move(os.path.basename(dirname), dirname) os.remove(os.path.basename(url)) @attr(complete=True) @attr(cluster=True) def test_srnaseq_star(self): """Run cluster analysis """ # self._install_test_files(self.data_dir) with make_workdir() as workdir: cl = ["seqcluster", "cluster", "-m", "../../data/examples/cluster/seqs.ma", "-a", "../../data/examples/clusters/seqs.bam", "-gtf", "../../data/examples/cluster/annotation_red.gtf", "-o", "test_out_res"] subprocess.check_call(cl)
<commit_before><commit_msg>Add automated test inspared in bcbio-nextgen code<commit_after>
"""This directory is setup with configurations to run the main functional test. It exercises a full analysis pipeline on a smaller subset of data. """ import os import subprocess import unittest import shutil import contextlib import collections import functools from nose import SkipTest from nose.plugins.attrib import attr import yaml @contextlib.contextmanager def make_workdir(): remove_old_dir = True dirname = os.path.join(os.path.dirname(__file__), "test_automated_output") if remove_old_dir: if os.path.exists(dirname): shutil.rmtree(dirname) os.makedirs(dirname) orig_dir = os.getcwd() try: os.chdir(dirname) yield dirname finally: os.chdir(orig_dir) def expected_failure(test): """Small decorator to mark tests as expected failure. Useful for tests that are work-in-progress. """ @functools.wraps(test) def inner(*args, **kwargs): try: test(*args, **kwargs) except Exception: raise SkipTest else: raise AssertionError('Failure expected') return inner class AutomatedAnalysisTest(unittest.TestCase): """Setup a full automated analysis and run the pipeline. """ def setUp(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated") def _install_test_files(self, data_dir): """Download required sequence and reference files. """ # self._download_to_dir(url, dirname) def _download_to_dir(self, url, dirname): print dirname cl = ["wget", url] subprocess.check_call(cl) cl = ["tar", "-xzvpf", os.path.basename(url)] subprocess.check_call(cl) shutil.move(os.path.basename(dirname), dirname) os.remove(os.path.basename(url)) @attr(complete=True) @attr(cluster=True) def test_srnaseq_star(self): """Run cluster analysis """ # self._install_test_files(self.data_dir) with make_workdir() as workdir: cl = ["seqcluster", "cluster", "-m", "../../data/examples/cluster/seqs.ma", "-a", "../../data/examples/clusters/seqs.bam", "-gtf", "../../data/examples/cluster/annotation_red.gtf", "-o", "test_out_res"] subprocess.check_call(cl)
Add automated test inspared in bcbio-nextgen code"""This directory is setup with configurations to run the main functional test. It exercises a full analysis pipeline on a smaller subset of data. """ import os import subprocess import unittest import shutil import contextlib import collections import functools from nose import SkipTest from nose.plugins.attrib import attr import yaml @contextlib.contextmanager def make_workdir(): remove_old_dir = True dirname = os.path.join(os.path.dirname(__file__), "test_automated_output") if remove_old_dir: if os.path.exists(dirname): shutil.rmtree(dirname) os.makedirs(dirname) orig_dir = os.getcwd() try: os.chdir(dirname) yield dirname finally: os.chdir(orig_dir) def expected_failure(test): """Small decorator to mark tests as expected failure. Useful for tests that are work-in-progress. """ @functools.wraps(test) def inner(*args, **kwargs): try: test(*args, **kwargs) except Exception: raise SkipTest else: raise AssertionError('Failure expected') return inner class AutomatedAnalysisTest(unittest.TestCase): """Setup a full automated analysis and run the pipeline. """ def setUp(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated") def _install_test_files(self, data_dir): """Download required sequence and reference files. """ # self._download_to_dir(url, dirname) def _download_to_dir(self, url, dirname): print dirname cl = ["wget", url] subprocess.check_call(cl) cl = ["tar", "-xzvpf", os.path.basename(url)] subprocess.check_call(cl) shutil.move(os.path.basename(dirname), dirname) os.remove(os.path.basename(url)) @attr(complete=True) @attr(cluster=True) def test_srnaseq_star(self): """Run cluster analysis """ # self._install_test_files(self.data_dir) with make_workdir() as workdir: cl = ["seqcluster", "cluster", "-m", "../../data/examples/cluster/seqs.ma", "-a", "../../data/examples/clusters/seqs.bam", "-gtf", "../../data/examples/cluster/annotation_red.gtf", "-o", "test_out_res"] subprocess.check_call(cl)
<commit_before><commit_msg>Add automated test inspared in bcbio-nextgen code<commit_after>"""This directory is setup with configurations to run the main functional test. It exercises a full analysis pipeline on a smaller subset of data. """ import os import subprocess import unittest import shutil import contextlib import collections import functools from nose import SkipTest from nose.plugins.attrib import attr import yaml @contextlib.contextmanager def make_workdir(): remove_old_dir = True dirname = os.path.join(os.path.dirname(__file__), "test_automated_output") if remove_old_dir: if os.path.exists(dirname): shutil.rmtree(dirname) os.makedirs(dirname) orig_dir = os.getcwd() try: os.chdir(dirname) yield dirname finally: os.chdir(orig_dir) def expected_failure(test): """Small decorator to mark tests as expected failure. Useful for tests that are work-in-progress. """ @functools.wraps(test) def inner(*args, **kwargs): try: test(*args, **kwargs) except Exception: raise SkipTest else: raise AssertionError('Failure expected') return inner class AutomatedAnalysisTest(unittest.TestCase): """Setup a full automated analysis and run the pipeline. """ def setUp(self): self.data_dir = os.path.join(os.path.dirname(__file__), "data", "automated") def _install_test_files(self, data_dir): """Download required sequence and reference files. """ # self._download_to_dir(url, dirname) def _download_to_dir(self, url, dirname): print dirname cl = ["wget", url] subprocess.check_call(cl) cl = ["tar", "-xzvpf", os.path.basename(url)] subprocess.check_call(cl) shutil.move(os.path.basename(dirname), dirname) os.remove(os.path.basename(url)) @attr(complete=True) @attr(cluster=True) def test_srnaseq_star(self): """Run cluster analysis """ # self._install_test_files(self.data_dir) with make_workdir() as workdir: cl = ["seqcluster", "cluster", "-m", "../../data/examples/cluster/seqs.ma", "-a", "../../data/examples/clusters/seqs.bam", "-gtf", "../../data/examples/cluster/annotation_red.gtf", "-o", "test_out_res"] subprocess.check_call(cl)
69aeb3afafc79bd9f7f85231f25ca198d4e23367
AutoTheBoringStuffProjects/Chp6Password/pw.py
AutoTheBoringStuffProjects/Chp6Password/pw.py
#!/urs/local/bin/python # pw.py - an insecure password locker program import sys import pyperclip PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6', 'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt', 'luggage': '12345'} if len(sys.argv) < 2: print('Usage: python pw.py [account] - copy account password') sys.exit() account = sys.argv[1] # first command line arg is the account name if account in PASSWORDS: pyperclip.copy(PASSWORDS[account]) print('Password for ' + account + ' copied to clipboard.') else: print('There is no account named ' + account)
Add project for chapter 6 Automate the Boring Stuff
Add project for chapter 6 Automate the Boring Stuff
Python
mit
HKuz/Test_Code
Add project for chapter 6 Automate the Boring Stuff
#!/urs/local/bin/python # pw.py - an insecure password locker program import sys import pyperclip PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6', 'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt', 'luggage': '12345'} if len(sys.argv) < 2: print('Usage: python pw.py [account] - copy account password') sys.exit() account = sys.argv[1] # first command line arg is the account name if account in PASSWORDS: pyperclip.copy(PASSWORDS[account]) print('Password for ' + account + ' copied to clipboard.') else: print('There is no account named ' + account)
<commit_before><commit_msg>Add project for chapter 6 Automate the Boring Stuff<commit_after>
#!/urs/local/bin/python # pw.py - an insecure password locker program import sys import pyperclip PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6', 'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt', 'luggage': '12345'} if len(sys.argv) < 2: print('Usage: python pw.py [account] - copy account password') sys.exit() account = sys.argv[1] # first command line arg is the account name if account in PASSWORDS: pyperclip.copy(PASSWORDS[account]) print('Password for ' + account + ' copied to clipboard.') else: print('There is no account named ' + account)
Add project for chapter 6 Automate the Boring Stuff#!/urs/local/bin/python # pw.py - an insecure password locker program import sys import pyperclip PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6', 'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt', 'luggage': '12345'} if len(sys.argv) < 2: print('Usage: python pw.py [account] - copy account password') sys.exit() account = sys.argv[1] # first command line arg is the account name if account in PASSWORDS: pyperclip.copy(PASSWORDS[account]) print('Password for ' + account + ' copied to clipboard.') else: print('There is no account named ' + account)
<commit_before><commit_msg>Add project for chapter 6 Automate the Boring Stuff<commit_after>#!/urs/local/bin/python # pw.py - an insecure password locker program import sys import pyperclip PASSWORDS = {'email': 'F7minlBDDuvMJuxESSKHFhTxFtjVB6', 'blog': 'VmALvQyKAxiVH5G8v01if1MLZF3sdt', 'luggage': '12345'} if len(sys.argv) < 2: print('Usage: python pw.py [account] - copy account password') sys.exit() account = sys.argv[1] # first command line arg is the account name if account in PASSWORDS: pyperclip.copy(PASSWORDS[account]) print('Password for ' + account + ' copied to clipboard.') else: print('There is no account named ' + account)
8d5b0bd8f50d7b0489ebbafd22c66cf5304d308f
auth0/v3/test/management/test_branding.py
auth0/v3/test/management/test_branding.py
import unittest import mock from ...management.branding import Branding class TestBranding(unittest.TestCase): def test_init_with_optionals(self): branding = Branding( domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2) ) self.assertEqual(branding.client.options.timeout, (10, 2)) telemetry = branding.client.base_headers.get("Auth0-Client", None) self.assertEqual(telemetry, None) @mock.patch("auth0.v3.management.branding.RestClient") def test_get(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get() api.get.assert_called_with( "https://domain/api/v2/branding", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update(self, mock_rc): api = mock_rc.return_value api.patch.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update({"a": "b", "c": "d"}) api.patch.assert_called_with( "https://domain/api/v2/branding", data={"a": "b", "c": "d"} ) @mock.patch("auth0.v3.management.branding.RestClient") def test_get_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get_template_universal_login() api.get.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_delete_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.delete_template_universal_login() api.delete.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update_template_universal_login(self, mock_rc): api = mock_rc.return_value api.put.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update_template_universal_login({"a": "b", "c": "d"}) api.put.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", type="put_universal-login_body", body={"template": {"a": "b", "c": "d"}}, )
Add tests for new endpoints
Add tests for new endpoints
Python
mit
auth0/auth0-python,auth0/auth0-python
Add tests for new endpoints
import unittest import mock from ...management.branding import Branding class TestBranding(unittest.TestCase): def test_init_with_optionals(self): branding = Branding( domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2) ) self.assertEqual(branding.client.options.timeout, (10, 2)) telemetry = branding.client.base_headers.get("Auth0-Client", None) self.assertEqual(telemetry, None) @mock.patch("auth0.v3.management.branding.RestClient") def test_get(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get() api.get.assert_called_with( "https://domain/api/v2/branding", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update(self, mock_rc): api = mock_rc.return_value api.patch.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update({"a": "b", "c": "d"}) api.patch.assert_called_with( "https://domain/api/v2/branding", data={"a": "b", "c": "d"} ) @mock.patch("auth0.v3.management.branding.RestClient") def test_get_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get_template_universal_login() api.get.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_delete_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.delete_template_universal_login() api.delete.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update_template_universal_login(self, mock_rc): api = mock_rc.return_value api.put.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update_template_universal_login({"a": "b", "c": "d"}) api.put.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", type="put_universal-login_body", body={"template": {"a": "b", "c": "d"}}, )
<commit_before><commit_msg>Add tests for new endpoints<commit_after>
import unittest import mock from ...management.branding import Branding class TestBranding(unittest.TestCase): def test_init_with_optionals(self): branding = Branding( domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2) ) self.assertEqual(branding.client.options.timeout, (10, 2)) telemetry = branding.client.base_headers.get("Auth0-Client", None) self.assertEqual(telemetry, None) @mock.patch("auth0.v3.management.branding.RestClient") def test_get(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get() api.get.assert_called_with( "https://domain/api/v2/branding", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update(self, mock_rc): api = mock_rc.return_value api.patch.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update({"a": "b", "c": "d"}) api.patch.assert_called_with( "https://domain/api/v2/branding", data={"a": "b", "c": "d"} ) @mock.patch("auth0.v3.management.branding.RestClient") def test_get_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get_template_universal_login() api.get.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_delete_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.delete_template_universal_login() api.delete.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update_template_universal_login(self, mock_rc): api = mock_rc.return_value api.put.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update_template_universal_login({"a": "b", "c": "d"}) api.put.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", type="put_universal-login_body", body={"template": {"a": "b", "c": "d"}}, )
Add tests for new endpointsimport unittest import mock from ...management.branding import Branding class TestBranding(unittest.TestCase): def test_init_with_optionals(self): branding = Branding( domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2) ) self.assertEqual(branding.client.options.timeout, (10, 2)) telemetry = branding.client.base_headers.get("Auth0-Client", None) self.assertEqual(telemetry, None) @mock.patch("auth0.v3.management.branding.RestClient") def test_get(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get() api.get.assert_called_with( "https://domain/api/v2/branding", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update(self, mock_rc): api = mock_rc.return_value api.patch.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update({"a": "b", "c": "d"}) api.patch.assert_called_with( "https://domain/api/v2/branding", data={"a": "b", "c": "d"} ) @mock.patch("auth0.v3.management.branding.RestClient") def test_get_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get_template_universal_login() api.get.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_delete_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.delete_template_universal_login() api.delete.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update_template_universal_login(self, mock_rc): api = mock_rc.return_value api.put.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update_template_universal_login({"a": "b", "c": "d"}) api.put.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", type="put_universal-login_body", body={"template": {"a": "b", "c": "d"}}, )
<commit_before><commit_msg>Add tests for new endpoints<commit_after>import unittest import mock from ...management.branding import Branding class TestBranding(unittest.TestCase): def test_init_with_optionals(self): branding = Branding( domain="domain", token="jwttoken", telemetry=False, timeout=(10, 2) ) self.assertEqual(branding.client.options.timeout, (10, 2)) telemetry = branding.client.base_headers.get("Auth0-Client", None) self.assertEqual(telemetry, None) @mock.patch("auth0.v3.management.branding.RestClient") def test_get(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get() api.get.assert_called_with( "https://domain/api/v2/branding", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update(self, mock_rc): api = mock_rc.return_value api.patch.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update({"a": "b", "c": "d"}) api.patch.assert_called_with( "https://domain/api/v2/branding", data={"a": "b", "c": "d"} ) @mock.patch("auth0.v3.management.branding.RestClient") def test_get_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.get_template_universal_login() api.get.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_delete_template_universal_login(self, mock_rc): api = mock_rc.return_value branding = Branding(domain="domain", token="jwttoken") branding.delete_template_universal_login() api.delete.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", ) @mock.patch("auth0.v3.management.branding.RestClient") def test_update_template_universal_login(self, mock_rc): api = mock_rc.return_value api.put.return_value = {} branding = Branding(domain="domain", token="jwttoken") branding.update_template_universal_login({"a": "b", "c": "d"}) api.put.assert_called_with( "https://domain/api/v2/branding/templates/universal-login", type="put_universal-login_body", body={"template": {"a": "b", "c": "d"}}, )
a06cd308c9aaa908386d2a1390817049723f28af
contrib/trigger_rtd_build.py
contrib/trigger_rtd_build.py
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'http://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read()
Add script for triggering rtf build (curl is not available on build slave).
Add script for triggering rtf build (curl is not available on build slave).
Python
apache-2.0
Cloud-Elasticity-Services/as-libcloud,pquentin/libcloud,wido/libcloud,schaubl/libcloud,atsaki/libcloud,marcinzaremba/libcloud,mbrukman/libcloud,marcinzaremba/libcloud,carletes/libcloud,thesquelched/libcloud,Itxaka/libcloud,vongazman/libcloud,t-tran/libcloud,jerryblakley/libcloud,munkiat/libcloud,sgammon/libcloud,iPlantCollaborativeOpenSource/libcloud,Verizon/libcloud,Kami/libcloud,ClusterHQ/libcloud,wuyuewen/libcloud,DimensionDataCBUSydney/libcloud,JamesGuthrie/libcloud,NexusIS/libcloud,andrewsomething/libcloud,niteoweb/libcloud,Scalr/libcloud,smaffulli/libcloud,mgogoulos/libcloud,sahildua2305/libcloud,aleGpereira/libcloud,StackPointCloud/libcloud,sahildua2305/libcloud,pantheon-systems/libcloud,t-tran/libcloud,illfelder/libcloud,wrigri/libcloud,Cloud-Elasticity-Services/as-libcloud,illfelder/libcloud,pquentin/libcloud,ClusterHQ/libcloud,MrBasset/libcloud,sahildua2305/libcloud,watermelo/libcloud,briancurtin/libcloud,cryptickp/libcloud,dcorbacho/libcloud,Scalr/libcloud,marcinzaremba/libcloud,Scalr/libcloud,sergiorua/libcloud,mgogoulos/libcloud,supertom/libcloud,jimbobhickville/libcloud,munkiat/libcloud,apache/libcloud,thesquelched/libcloud,Jc2k/libcloud,cloudControl/libcloud,vongazman/libcloud,smaffulli/libcloud,briancurtin/libcloud,DimensionDataCBUSydney/libcloud,niteoweb/libcloud,aviweit/libcloud,niteoweb/libcloud,cryptickp/libcloud,wrigri/libcloud,mtekel/libcloud,samuelchong/libcloud,dcorbacho/libcloud,erjohnso/libcloud,Verizon/libcloud,mathspace/libcloud,JamesGuthrie/libcloud,mtekel/libcloud,supertom/libcloud,Jc2k/libcloud,briancurtin/libcloud,mathspace/libcloud,mbrukman/libcloud,SecurityCompass/libcloud,atsaki/libcloud,NexusIS/libcloud,sfriesel/libcloud,DimensionDataCBUSydney/libcloud,SecurityCompass/libcloud,carletes/libcloud,t-tran/libcloud,Cloud-Elasticity-Services/as-libcloud,pantheon-systems/libcloud,sergiorua/libcloud,MrBasset/libcloud,mistio/libcloud,wuyuewen/libcloud,mbrukman/libcloud,jimbobhickville/libcloud,erjohnso/libcloud,apache/libcloud,mistio/libcloud,pantheon-systems/libcloud,techhat/libcloud,wido/libcloud,StackPointCloud/libcloud,cloudControl/libcloud,sfriesel/libcloud,carletes/libcloud,mgogoulos/libcloud,lochiiconnectivity/libcloud,cloudControl/libcloud,MrBasset/libcloud,thesquelched/libcloud,wido/libcloud,Kami/libcloud,aviweit/libcloud,techhat/libcloud,ByteInternet/libcloud,JamesGuthrie/libcloud,smaffulli/libcloud,ByteInternet/libcloud,curoverse/libcloud,schaubl/libcloud,techhat/libcloud,lochiiconnectivity/libcloud,apache/libcloud,Itxaka/libcloud,samuelchong/libcloud,ZuluPro/libcloud,cryptickp/libcloud,watermelo/libcloud,kater169/libcloud,samuelchong/libcloud,erjohnso/libcloud,watermelo/libcloud,NexusIS/libcloud,mistio/libcloud,vongazman/libcloud,StackPointCloud/libcloud,aleGpereira/libcloud,SecurityCompass/libcloud,curoverse/libcloud,jerryblakley/libcloud,Itxaka/libcloud,ByteInternet/libcloud,wrigri/libcloud,iPlantCollaborativeOpenSource/libcloud,ZuluPro/libcloud,kater169/libcloud,mathspace/libcloud,sergiorua/libcloud,sgammon/libcloud,lochiiconnectivity/libcloud,mtekel/libcloud,kater169/libcloud,atsaki/libcloud,supertom/libcloud,wuyuewen/libcloud,illfelder/libcloud,curoverse/libcloud,iPlantCollaborativeOpenSource/libcloud,Verizon/libcloud,ZuluPro/libcloud,pquentin/libcloud,dcorbacho/libcloud,munkiat/libcloud,sfriesel/libcloud,aviweit/libcloud,andrewsomething/libcloud,jimbobhickville/libcloud,andrewsomething/libcloud,aleGpereira/libcloud,jerryblakley/libcloud,schaubl/libcloud,Kami/libcloud
Add script for triggering rtf build (curl is not available on build slave).
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'http://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read()
<commit_before><commit_msg>Add script for triggering rtf build (curl is not available on build slave).<commit_after>
#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'http://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read()
Add script for triggering rtf build (curl is not available on build slave).#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'http://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read()
<commit_before><commit_msg>Add script for triggering rtf build (curl is not available on build slave).<commit_after>#!/usr/bin/env python # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import urllib2 key = sys.argv[1] url = 'http://readthedocs.org/build/%s' % (key) req = urllib2.Request(url, '') f = urllib2.urlopen(req) print f.read()
7bf47ceb1872db834a91cbcc2193366bff1abb58
dask/array/tests/test_fft.py
dask/array/tests/test_fft.py
import unittest import numpy as np import numpy.fft as npfft import dask from dask.array.core import Array from dask.utils import raises import dask.array as da from dask.array.fft import fft, ifft def eq(a, b): if isinstance(a, Array): adt = a._dtype a = a.compute(get=dask.get) else: adt = getattr(a, 'dtype', None) if isinstance(b, Array): bdt = b._dtype b = b.compute(get=dask.get) else: bdt = getattr(b, 'dtype', None) if not str(adt) == str(bdt): return False try: return np.allclose(a, b) except TypeError: pass c = a == b if isinstance(c, np.ndarray): return c.all() else: return c class TestFFT(unittest.TestCase): def test_cant_fft_chunked_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(5, 5)) assert raises(ValueError, lambda: fft(darr)) assert raises(ValueError, lambda: fft(darr, axis=0)) def test_fft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = fft(darr) expected = npfft.fft(nparr) assert eq(res, expected) def test_fft_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(10, 1)) res = fft(darr, axis=0) expected = npfft.fft(nparr, axis=0) assert eq(res, expected) def test_ifft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = ifft(darr) expected = npfft.ifft(nparr) assert eq(res, expected) darr2 = da.from_array(nparr, chunks=(10, 1)) res2 = ifft(darr2, axis=0) expected2 = npfft.ifft(nparr, axis=0) assert eq(res2, expected2) if __name__ == '__main__': unittest.main()
Add tests for fft and ifft
Add tests for fft and ifft
Python
bsd-3-clause
ssanderson/dask,mrocklin/dask,chrisbarber/dask,cowlicks/dask,PhE/dask,mrocklin/dask,mikegraham/dask,gameduell/dask,dask/dask,jcrist/dask,jakirkham/dask,vikhyat/dask,ssanderson/dask,wiso/dask,ContinuumIO/dask,ContinuumIO/dask,blaze/dask,pombredanne/dask,wiso/dask,mraspaud/dask,clarkfitzg/dask,vikhyat/dask,dask/dask,PhE/dask,jakirkham/dask,cpcloud/dask,mraspaud/dask,blaze/dask,jcrist/dask,clarkfitzg/dask,pombredanne/dask
Add tests for fft and ifft
import unittest import numpy as np import numpy.fft as npfft import dask from dask.array.core import Array from dask.utils import raises import dask.array as da from dask.array.fft import fft, ifft def eq(a, b): if isinstance(a, Array): adt = a._dtype a = a.compute(get=dask.get) else: adt = getattr(a, 'dtype', None) if isinstance(b, Array): bdt = b._dtype b = b.compute(get=dask.get) else: bdt = getattr(b, 'dtype', None) if not str(adt) == str(bdt): return False try: return np.allclose(a, b) except TypeError: pass c = a == b if isinstance(c, np.ndarray): return c.all() else: return c class TestFFT(unittest.TestCase): def test_cant_fft_chunked_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(5, 5)) assert raises(ValueError, lambda: fft(darr)) assert raises(ValueError, lambda: fft(darr, axis=0)) def test_fft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = fft(darr) expected = npfft.fft(nparr) assert eq(res, expected) def test_fft_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(10, 1)) res = fft(darr, axis=0) expected = npfft.fft(nparr, axis=0) assert eq(res, expected) def test_ifft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = ifft(darr) expected = npfft.ifft(nparr) assert eq(res, expected) darr2 = da.from_array(nparr, chunks=(10, 1)) res2 = ifft(darr2, axis=0) expected2 = npfft.ifft(nparr, axis=0) assert eq(res2, expected2) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for fft and ifft<commit_after>
import unittest import numpy as np import numpy.fft as npfft import dask from dask.array.core import Array from dask.utils import raises import dask.array as da from dask.array.fft import fft, ifft def eq(a, b): if isinstance(a, Array): adt = a._dtype a = a.compute(get=dask.get) else: adt = getattr(a, 'dtype', None) if isinstance(b, Array): bdt = b._dtype b = b.compute(get=dask.get) else: bdt = getattr(b, 'dtype', None) if not str(adt) == str(bdt): return False try: return np.allclose(a, b) except TypeError: pass c = a == b if isinstance(c, np.ndarray): return c.all() else: return c class TestFFT(unittest.TestCase): def test_cant_fft_chunked_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(5, 5)) assert raises(ValueError, lambda: fft(darr)) assert raises(ValueError, lambda: fft(darr, axis=0)) def test_fft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = fft(darr) expected = npfft.fft(nparr) assert eq(res, expected) def test_fft_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(10, 1)) res = fft(darr, axis=0) expected = npfft.fft(nparr, axis=0) assert eq(res, expected) def test_ifft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = ifft(darr) expected = npfft.ifft(nparr) assert eq(res, expected) darr2 = da.from_array(nparr, chunks=(10, 1)) res2 = ifft(darr2, axis=0) expected2 = npfft.ifft(nparr, axis=0) assert eq(res2, expected2) if __name__ == '__main__': unittest.main()
Add tests for fft and ifftimport unittest import numpy as np import numpy.fft as npfft import dask from dask.array.core import Array from dask.utils import raises import dask.array as da from dask.array.fft import fft, ifft def eq(a, b): if isinstance(a, Array): adt = a._dtype a = a.compute(get=dask.get) else: adt = getattr(a, 'dtype', None) if isinstance(b, Array): bdt = b._dtype b = b.compute(get=dask.get) else: bdt = getattr(b, 'dtype', None) if not str(adt) == str(bdt): return False try: return np.allclose(a, b) except TypeError: pass c = a == b if isinstance(c, np.ndarray): return c.all() else: return c class TestFFT(unittest.TestCase): def test_cant_fft_chunked_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(5, 5)) assert raises(ValueError, lambda: fft(darr)) assert raises(ValueError, lambda: fft(darr, axis=0)) def test_fft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = fft(darr) expected = npfft.fft(nparr) assert eq(res, expected) def test_fft_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(10, 1)) res = fft(darr, axis=0) expected = npfft.fft(nparr, axis=0) assert eq(res, expected) def test_ifft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = ifft(darr) expected = npfft.ifft(nparr) assert eq(res, expected) darr2 = da.from_array(nparr, chunks=(10, 1)) res2 = ifft(darr2, axis=0) expected2 = npfft.ifft(nparr, axis=0) assert eq(res2, expected2) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add tests for fft and ifft<commit_after>import unittest import numpy as np import numpy.fft as npfft import dask from dask.array.core import Array from dask.utils import raises import dask.array as da from dask.array.fft import fft, ifft def eq(a, b): if isinstance(a, Array): adt = a._dtype a = a.compute(get=dask.get) else: adt = getattr(a, 'dtype', None) if isinstance(b, Array): bdt = b._dtype b = b.compute(get=dask.get) else: bdt = getattr(b, 'dtype', None) if not str(adt) == str(bdt): return False try: return np.allclose(a, b) except TypeError: pass c = a == b if isinstance(c, np.ndarray): return c.all() else: return c class TestFFT(unittest.TestCase): def test_cant_fft_chunked_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(5, 5)) assert raises(ValueError, lambda: fft(darr)) assert raises(ValueError, lambda: fft(darr, axis=0)) def test_fft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = fft(darr) expected = npfft.fft(nparr) assert eq(res, expected) def test_fft_axis(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(10, 1)) res = fft(darr, axis=0) expected = npfft.fft(nparr, axis=0) assert eq(res, expected) def test_ifft(self): nparr = np.arange(100).reshape(10, 10) darr = da.from_array(nparr, chunks=(1, 10)) res = ifft(darr) expected = npfft.ifft(nparr) assert eq(res, expected) darr2 = da.from_array(nparr, chunks=(10, 1)) res2 = ifft(darr2, axis=0) expected2 = npfft.ifft(nparr, axis=0) assert eq(res2, expected2) if __name__ == '__main__': unittest.main()
ebc1c3ddb2adca3205d1976cf0def5145a31a6dc
bliski_publikator/contrib/sites/migrations/0003_auto_20160414_0903.py
bliski_publikator/contrib/sites/migrations/0003_auto_20160414_0903.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 09:03 from __future__ import unicode_literals import django.contrib.sites.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0002_set_site_domain_and_name'), ] operations = [ migrations.AlterField( model_name='site', name='domain', field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'), ), ]
Add standard migrations to contrib.sites
Add standard migrations to contrib.sites
Python
mit
watchdogpolska/bliski_publikator,watchdogpolska/bliski_publikator,watchdogpolska/bliski_publikator,watchdogpolska/bliski_publikator
Add standard migrations to contrib.sites
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 09:03 from __future__ import unicode_literals import django.contrib.sites.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0002_set_site_domain_and_name'), ] operations = [ migrations.AlterField( model_name='site', name='domain', field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'), ), ]
<commit_before><commit_msg>Add standard migrations to contrib.sites<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 09:03 from __future__ import unicode_literals import django.contrib.sites.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0002_set_site_domain_and_name'), ] operations = [ migrations.AlterField( model_name='site', name='domain', field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'), ), ]
Add standard migrations to contrib.sites# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 09:03 from __future__ import unicode_literals import django.contrib.sites.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0002_set_site_domain_and_name'), ] operations = [ migrations.AlterField( model_name='site', name='domain', field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'), ), ]
<commit_before><commit_msg>Add standard migrations to contrib.sites<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-14 09:03 from __future__ import unicode_literals import django.contrib.sites.models from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('sites', '0002_set_site_domain_and_name'), ] operations = [ migrations.AlterField( model_name='site', name='domain', field=models.CharField(max_length=100, unique=True, validators=[django.contrib.sites.models._simple_domain_name_validator], verbose_name='domain name'), ), ]
4ae97e9d15b28dd2eba5c5e404b071ee87773f77
coverage_score_viewer/migrations/0001_initial.py
coverage_score_viewer/migrations/0001_initial.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CoverageBoundary', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('admin_level', models.IntegerField()), ('name', models.TextField()), ('rank', models.IntegerField()), ('latest_timestamp', models.DateTimeField()), ('oldest_timestamp', models.DateTimeField()), ('coverage', models.FloatField()), ('original_coverage', models.FloatField()), ('total_coverage_gain', models.FloatField()), ('polygon', models.TextField()), ('bbox', models.TextField()), ('parent', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ 'db_table': 'coverage_boundary', }, bases=(models.Model,), ), migrations.CreateModel( name='CoverageScore', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('date', models.DateField()), ('coverage', models.FloatField()), ('coverage_boundary', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ }, bases=(models.Model,), ), ]
Add migration script for coverage scores.
Add migration script for coverage scores.
Python
mit
thomaskonrad/osm_austria_building_coverage,thomaskonrad/osm_austria_building_coverage,thomaskonrad/osm-austria-building-coverage,thomaskonrad/osm_austria_building_coverage,thomaskonrad/osm-austria-building-coverage,thomaskonrad/osm_austria_building_coverage,thomaskonrad/osm-austria-building-coverage,thomaskonrad/osm-austria-building-coverage
Add migration script for coverage scores.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CoverageBoundary', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('admin_level', models.IntegerField()), ('name', models.TextField()), ('rank', models.IntegerField()), ('latest_timestamp', models.DateTimeField()), ('oldest_timestamp', models.DateTimeField()), ('coverage', models.FloatField()), ('original_coverage', models.FloatField()), ('total_coverage_gain', models.FloatField()), ('polygon', models.TextField()), ('bbox', models.TextField()), ('parent', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ 'db_table': 'coverage_boundary', }, bases=(models.Model,), ), migrations.CreateModel( name='CoverageScore', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('date', models.DateField()), ('coverage', models.FloatField()), ('coverage_boundary', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ }, bases=(models.Model,), ), ]
<commit_before><commit_msg>Add migration script for coverage scores.<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CoverageBoundary', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('admin_level', models.IntegerField()), ('name', models.TextField()), ('rank', models.IntegerField()), ('latest_timestamp', models.DateTimeField()), ('oldest_timestamp', models.DateTimeField()), ('coverage', models.FloatField()), ('original_coverage', models.FloatField()), ('total_coverage_gain', models.FloatField()), ('polygon', models.TextField()), ('bbox', models.TextField()), ('parent', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ 'db_table': 'coverage_boundary', }, bases=(models.Model,), ), migrations.CreateModel( name='CoverageScore', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('date', models.DateField()), ('coverage', models.FloatField()), ('coverage_boundary', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ }, bases=(models.Model,), ), ]
Add migration script for coverage scores.# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CoverageBoundary', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('admin_level', models.IntegerField()), ('name', models.TextField()), ('rank', models.IntegerField()), ('latest_timestamp', models.DateTimeField()), ('oldest_timestamp', models.DateTimeField()), ('coverage', models.FloatField()), ('original_coverage', models.FloatField()), ('total_coverage_gain', models.FloatField()), ('polygon', models.TextField()), ('bbox', models.TextField()), ('parent', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ 'db_table': 'coverage_boundary', }, bases=(models.Model,), ), migrations.CreateModel( name='CoverageScore', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('date', models.DateField()), ('coverage', models.FloatField()), ('coverage_boundary', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ }, bases=(models.Model,), ), ]
<commit_before><commit_msg>Add migration script for coverage scores.<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='CoverageBoundary', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('admin_level', models.IntegerField()), ('name', models.TextField()), ('rank', models.IntegerField()), ('latest_timestamp', models.DateTimeField()), ('oldest_timestamp', models.DateTimeField()), ('coverage', models.FloatField()), ('original_coverage', models.FloatField()), ('total_coverage_gain', models.FloatField()), ('polygon', models.TextField()), ('bbox', models.TextField()), ('parent', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ 'db_table': 'coverage_boundary', }, bases=(models.Model,), ), migrations.CreateModel( name='CoverageScore', fields=[ ('id', models.IntegerField(primary_key=True, serialize=False)), ('date', models.DateField()), ('coverage', models.FloatField()), ('coverage_boundary', models.ForeignKey(to='coverage_score_viewer.CoverageBoundary')), ], options={ }, bases=(models.Model,), ), ]
e10a5979bc4cbe5b6ff89ae7af6ea726eb1a0000
functional/tests/compute/v2/test_server_group.py
functional/tests/compute/v2/test_server_group.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class ServerGroupTests(test.TestCase): """Functional tests for servergroup. """ NAME = uuid.uuid4().hex HEADERS = ['Name'] FIELDS = ['name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('server group create --policy affinity ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): raw_output = cls.openstack('server group delete ' + cls.NAME) cls.assertOutput('', raw_output) def test_server_group_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('server group list' + opts) self.assertIn(self.NAME, raw_output) def test_server_group_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('server group show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output)
Add functional tests for server group in ComputeV2
Add functional tests for server group in ComputeV2 Change-Id: I43a6ce3a6d976f3d1bd68c0483c929977b660f0d
Python
apache-2.0
dtroyer/python-openstackclient,redhat-openstack/python-openstackclient,dtroyer/python-openstackclient,openstack/python-openstackclient,openstack/python-openstackclient,redhat-openstack/python-openstackclient
Add functional tests for server group in ComputeV2 Change-Id: I43a6ce3a6d976f3d1bd68c0483c929977b660f0d
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class ServerGroupTests(test.TestCase): """Functional tests for servergroup. """ NAME = uuid.uuid4().hex HEADERS = ['Name'] FIELDS = ['name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('server group create --policy affinity ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): raw_output = cls.openstack('server group delete ' + cls.NAME) cls.assertOutput('', raw_output) def test_server_group_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('server group list' + opts) self.assertIn(self.NAME, raw_output) def test_server_group_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('server group show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output)
<commit_before><commit_msg>Add functional tests for server group in ComputeV2 Change-Id: I43a6ce3a6d976f3d1bd68c0483c929977b660f0d<commit_after>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class ServerGroupTests(test.TestCase): """Functional tests for servergroup. """ NAME = uuid.uuid4().hex HEADERS = ['Name'] FIELDS = ['name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('server group create --policy affinity ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): raw_output = cls.openstack('server group delete ' + cls.NAME) cls.assertOutput('', raw_output) def test_server_group_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('server group list' + opts) self.assertIn(self.NAME, raw_output) def test_server_group_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('server group show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output)
Add functional tests for server group in ComputeV2 Change-Id: I43a6ce3a6d976f3d1bd68c0483c929977b660f0d# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class ServerGroupTests(test.TestCase): """Functional tests for servergroup. """ NAME = uuid.uuid4().hex HEADERS = ['Name'] FIELDS = ['name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('server group create --policy affinity ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): raw_output = cls.openstack('server group delete ' + cls.NAME) cls.assertOutput('', raw_output) def test_server_group_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('server group list' + opts) self.assertIn(self.NAME, raw_output) def test_server_group_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('server group show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output)
<commit_before><commit_msg>Add functional tests for server group in ComputeV2 Change-Id: I43a6ce3a6d976f3d1bd68c0483c929977b660f0d<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from functional.common import test class ServerGroupTests(test.TestCase): """Functional tests for servergroup. """ NAME = uuid.uuid4().hex HEADERS = ['Name'] FIELDS = ['name'] @classmethod def setUpClass(cls): opts = cls.get_show_opts(cls.FIELDS) raw_output = cls.openstack('server group create --policy affinity ' + cls.NAME + opts) expected = cls.NAME + '\n' cls.assertOutput(expected, raw_output) @classmethod def tearDownClass(cls): raw_output = cls.openstack('server group delete ' + cls.NAME) cls.assertOutput('', raw_output) def test_server_group_list(self): opts = self.get_list_opts(self.HEADERS) raw_output = self.openstack('server group list' + opts) self.assertIn(self.NAME, raw_output) def test_server_group_show(self): opts = self.get_show_opts(self.FIELDS) raw_output = self.openstack('server group show ' + self.NAME + opts) self.assertEqual(self.NAME + "\n", raw_output)
cb7631303a73e69ffa62811be2ea79e2d4a6d64b
src/main/resources/script_templates/Python/Process_Folder.py
src/main/resources/script_templates/Python/Process_Folder.py
import os from ij import IJ, ImagePlus from ij.gui import GenericDialog def run(): srcDir = IJ.getDirectory("Input_directory") if not srcDir: return dstDir = IJ.getDirectory("Output_directory") if not dstDir: return gd = GenericDialog("Process Folder") gd.addStringField("File_extension", ".tif") gd.addStringField("File_name_contains", "") gd.addCheckbox("Keep directory structure when saving", True) gd.showDialog() if gd.wasCanceled(): return ext = gd.getNextString() containString = gd.getNextString() keepDirectories = gd.getNextBoolean() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern if containString not in filename: continue process(srcDir, dstDir, root, filename, keepDirectories) def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) # Put your processing commands here! # Saving the image saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) print "Saving to", saveDir IJ.saveAs(imp, "Tiff", os.path.join(saveDir, fileName)); imp.close() run()
Add python template to process a folder of images
Add python template to process a folder of images
Python
bsd-2-clause
imagej/imagej-legacy,imagej/imagej-legacy,imagej/imagej-legacy,imagej/imagej-legacy
Add python template to process a folder of images
import os from ij import IJ, ImagePlus from ij.gui import GenericDialog def run(): srcDir = IJ.getDirectory("Input_directory") if not srcDir: return dstDir = IJ.getDirectory("Output_directory") if not dstDir: return gd = GenericDialog("Process Folder") gd.addStringField("File_extension", ".tif") gd.addStringField("File_name_contains", "") gd.addCheckbox("Keep directory structure when saving", True) gd.showDialog() if gd.wasCanceled(): return ext = gd.getNextString() containString = gd.getNextString() keepDirectories = gd.getNextBoolean() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern if containString not in filename: continue process(srcDir, dstDir, root, filename, keepDirectories) def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) # Put your processing commands here! # Saving the image saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) print "Saving to", saveDir IJ.saveAs(imp, "Tiff", os.path.join(saveDir, fileName)); imp.close() run()
<commit_before><commit_msg>Add python template to process a folder of images<commit_after>
import os from ij import IJ, ImagePlus from ij.gui import GenericDialog def run(): srcDir = IJ.getDirectory("Input_directory") if not srcDir: return dstDir = IJ.getDirectory("Output_directory") if not dstDir: return gd = GenericDialog("Process Folder") gd.addStringField("File_extension", ".tif") gd.addStringField("File_name_contains", "") gd.addCheckbox("Keep directory structure when saving", True) gd.showDialog() if gd.wasCanceled(): return ext = gd.getNextString() containString = gd.getNextString() keepDirectories = gd.getNextBoolean() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern if containString not in filename: continue process(srcDir, dstDir, root, filename, keepDirectories) def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) # Put your processing commands here! # Saving the image saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) print "Saving to", saveDir IJ.saveAs(imp, "Tiff", os.path.join(saveDir, fileName)); imp.close() run()
Add python template to process a folder of imagesimport os from ij import IJ, ImagePlus from ij.gui import GenericDialog def run(): srcDir = IJ.getDirectory("Input_directory") if not srcDir: return dstDir = IJ.getDirectory("Output_directory") if not dstDir: return gd = GenericDialog("Process Folder") gd.addStringField("File_extension", ".tif") gd.addStringField("File_name_contains", "") gd.addCheckbox("Keep directory structure when saving", True) gd.showDialog() if gd.wasCanceled(): return ext = gd.getNextString() containString = gd.getNextString() keepDirectories = gd.getNextBoolean() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern if containString not in filename: continue process(srcDir, dstDir, root, filename, keepDirectories) def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) # Put your processing commands here! # Saving the image saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) print "Saving to", saveDir IJ.saveAs(imp, "Tiff", os.path.join(saveDir, fileName)); imp.close() run()
<commit_before><commit_msg>Add python template to process a folder of images<commit_after>import os from ij import IJ, ImagePlus from ij.gui import GenericDialog def run(): srcDir = IJ.getDirectory("Input_directory") if not srcDir: return dstDir = IJ.getDirectory("Output_directory") if not dstDir: return gd = GenericDialog("Process Folder") gd.addStringField("File_extension", ".tif") gd.addStringField("File_name_contains", "") gd.addCheckbox("Keep directory structure when saving", True) gd.showDialog() if gd.wasCanceled(): return ext = gd.getNextString() containString = gd.getNextString() keepDirectories = gd.getNextBoolean() for root, directories, filenames in os.walk(srcDir): for filename in filenames: # Check for file extension if not filename.endswith(ext): continue # Check for file name pattern if containString not in filename: continue process(srcDir, dstDir, root, filename, keepDirectories) def process(srcDir, dstDir, currentDir, fileName, keepDirectories): print "Processing:" # Opening the image print "Open image file", fileName imp = IJ.openImage(os.path.join(currentDir, fileName)) # Put your processing commands here! # Saving the image saveDir = currentDir.replace(srcDir, dstDir) if keepDirectories else dstDir if not os.path.exists(saveDir): os.makedirs(saveDir) print "Saving to", saveDir IJ.saveAs(imp, "Tiff", os.path.join(saveDir, fileName)); imp.close() run()
a558b9efe5f20af2b5b4e588add8648bfba22c2c
app/scripts/benchmark_match_accuracy.py
app/scripts/benchmark_match_accuracy.py
from tabulate import tabulate import csv import yaml import sys import os.path as path base_directory = path.dirname(path.dirname(path.abspath(__file__))) sys.path.append(base_directory) import deparse from segment import Segment def load_segments(filename): '''Load a segment feature matrix from a CSV file, returning a list of dictionaries with information about each segment. ''' with open(filename, 'r') as f: return [segment for segment in csv.DictReader(f)] def load_feature_strings(filename): '''Load a feature string list from a CSV file, returning a list of lists where the first item in each is the IPA string and the second is the corresponding feature string. ''' with open(filename, 'r') as f: return [line for line in csv.reader(f)] def load_diacritics(filename): '''Load diacritic data from a YAML file, returning a list of dictionaries with information about each diacritic. ''' with open(filename, 'r') as f: return yaml.load(f) def main(): segments = load_segments(path.join(base_directory, 'data', 'features.csv')) diacritics = load_diacritics(path.join(base_directory, 'data', 'diacritics.yaml')) datasets = ['feature-strings', 'hayes-feature-strings', 'feature-strings-with-diacritics'] print('Beginning benchmark\n===================\n') results = [] for dataset in datasets: print('Running dataset: {0}'.format(dataset)) filename = '{0}.csv'.format(dataset) accuracy = benchmark_match_accuracy(segments, diacritics, filename) results.append([dataset, accuracy]) print('Finished!\n') print(tabulate(results, headers=['Dataset', 'Accuracy'])) def benchmark_match_accuracy(segments, diacritics, filename): '''Convert all given segments to feature strings, then convert back to segments. Use the given feature string file. Return the percentage accuracy of the conversion. ''' feature_strings = load_feature_strings(path.join(base_directory, 'data', filename)) matches = [] for segment in segments: base_segment = Segment.from_dictionary(segment) matches.append((segment['IPA'], deparse.segment_match(feature_strings, base_segment))) for diacritic in diacritics: IPA_representation = segment['IPA'] + diacritic['IPA'] if base_segment.meets_conditions(diacritic['conditions']): diacritic_segment = base_segment + Segment(diacritic['applies'].get('positive', []), diacritic['applies'].get('negative', [])) matches.append((IPA_representation, deparse.segment_match(feature_strings, diacritic_segment))) successes = len([match for match in matches if match[0] == match[1]]) return (successes / len(matches)) if __name__ == '__main__': main()
Add match accuracy benchmark script
Add match accuracy benchmark script
Python
mit
kdelwat/LangEvolve,kdelwat/LangEvolve,kdelwat/LangEvolve
Add match accuracy benchmark script
from tabulate import tabulate import csv import yaml import sys import os.path as path base_directory = path.dirname(path.dirname(path.abspath(__file__))) sys.path.append(base_directory) import deparse from segment import Segment def load_segments(filename): '''Load a segment feature matrix from a CSV file, returning a list of dictionaries with information about each segment. ''' with open(filename, 'r') as f: return [segment for segment in csv.DictReader(f)] def load_feature_strings(filename): '''Load a feature string list from a CSV file, returning a list of lists where the first item in each is the IPA string and the second is the corresponding feature string. ''' with open(filename, 'r') as f: return [line for line in csv.reader(f)] def load_diacritics(filename): '''Load diacritic data from a YAML file, returning a list of dictionaries with information about each diacritic. ''' with open(filename, 'r') as f: return yaml.load(f) def main(): segments = load_segments(path.join(base_directory, 'data', 'features.csv')) diacritics = load_diacritics(path.join(base_directory, 'data', 'diacritics.yaml')) datasets = ['feature-strings', 'hayes-feature-strings', 'feature-strings-with-diacritics'] print('Beginning benchmark\n===================\n') results = [] for dataset in datasets: print('Running dataset: {0}'.format(dataset)) filename = '{0}.csv'.format(dataset) accuracy = benchmark_match_accuracy(segments, diacritics, filename) results.append([dataset, accuracy]) print('Finished!\n') print(tabulate(results, headers=['Dataset', 'Accuracy'])) def benchmark_match_accuracy(segments, diacritics, filename): '''Convert all given segments to feature strings, then convert back to segments. Use the given feature string file. Return the percentage accuracy of the conversion. ''' feature_strings = load_feature_strings(path.join(base_directory, 'data', filename)) matches = [] for segment in segments: base_segment = Segment.from_dictionary(segment) matches.append((segment['IPA'], deparse.segment_match(feature_strings, base_segment))) for diacritic in diacritics: IPA_representation = segment['IPA'] + diacritic['IPA'] if base_segment.meets_conditions(diacritic['conditions']): diacritic_segment = base_segment + Segment(diacritic['applies'].get('positive', []), diacritic['applies'].get('negative', [])) matches.append((IPA_representation, deparse.segment_match(feature_strings, diacritic_segment))) successes = len([match for match in matches if match[0] == match[1]]) return (successes / len(matches)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add match accuracy benchmark script<commit_after>
from tabulate import tabulate import csv import yaml import sys import os.path as path base_directory = path.dirname(path.dirname(path.abspath(__file__))) sys.path.append(base_directory) import deparse from segment import Segment def load_segments(filename): '''Load a segment feature matrix from a CSV file, returning a list of dictionaries with information about each segment. ''' with open(filename, 'r') as f: return [segment for segment in csv.DictReader(f)] def load_feature_strings(filename): '''Load a feature string list from a CSV file, returning a list of lists where the first item in each is the IPA string and the second is the corresponding feature string. ''' with open(filename, 'r') as f: return [line for line in csv.reader(f)] def load_diacritics(filename): '''Load diacritic data from a YAML file, returning a list of dictionaries with information about each diacritic. ''' with open(filename, 'r') as f: return yaml.load(f) def main(): segments = load_segments(path.join(base_directory, 'data', 'features.csv')) diacritics = load_diacritics(path.join(base_directory, 'data', 'diacritics.yaml')) datasets = ['feature-strings', 'hayes-feature-strings', 'feature-strings-with-diacritics'] print('Beginning benchmark\n===================\n') results = [] for dataset in datasets: print('Running dataset: {0}'.format(dataset)) filename = '{0}.csv'.format(dataset) accuracy = benchmark_match_accuracy(segments, diacritics, filename) results.append([dataset, accuracy]) print('Finished!\n') print(tabulate(results, headers=['Dataset', 'Accuracy'])) def benchmark_match_accuracy(segments, diacritics, filename): '''Convert all given segments to feature strings, then convert back to segments. Use the given feature string file. Return the percentage accuracy of the conversion. ''' feature_strings = load_feature_strings(path.join(base_directory, 'data', filename)) matches = [] for segment in segments: base_segment = Segment.from_dictionary(segment) matches.append((segment['IPA'], deparse.segment_match(feature_strings, base_segment))) for diacritic in diacritics: IPA_representation = segment['IPA'] + diacritic['IPA'] if base_segment.meets_conditions(diacritic['conditions']): diacritic_segment = base_segment + Segment(diacritic['applies'].get('positive', []), diacritic['applies'].get('negative', [])) matches.append((IPA_representation, deparse.segment_match(feature_strings, diacritic_segment))) successes = len([match for match in matches if match[0] == match[1]]) return (successes / len(matches)) if __name__ == '__main__': main()
Add match accuracy benchmark scriptfrom tabulate import tabulate import csv import yaml import sys import os.path as path base_directory = path.dirname(path.dirname(path.abspath(__file__))) sys.path.append(base_directory) import deparse from segment import Segment def load_segments(filename): '''Load a segment feature matrix from a CSV file, returning a list of dictionaries with information about each segment. ''' with open(filename, 'r') as f: return [segment for segment in csv.DictReader(f)] def load_feature_strings(filename): '''Load a feature string list from a CSV file, returning a list of lists where the first item in each is the IPA string and the second is the corresponding feature string. ''' with open(filename, 'r') as f: return [line for line in csv.reader(f)] def load_diacritics(filename): '''Load diacritic data from a YAML file, returning a list of dictionaries with information about each diacritic. ''' with open(filename, 'r') as f: return yaml.load(f) def main(): segments = load_segments(path.join(base_directory, 'data', 'features.csv')) diacritics = load_diacritics(path.join(base_directory, 'data', 'diacritics.yaml')) datasets = ['feature-strings', 'hayes-feature-strings', 'feature-strings-with-diacritics'] print('Beginning benchmark\n===================\n') results = [] for dataset in datasets: print('Running dataset: {0}'.format(dataset)) filename = '{0}.csv'.format(dataset) accuracy = benchmark_match_accuracy(segments, diacritics, filename) results.append([dataset, accuracy]) print('Finished!\n') print(tabulate(results, headers=['Dataset', 'Accuracy'])) def benchmark_match_accuracy(segments, diacritics, filename): '''Convert all given segments to feature strings, then convert back to segments. Use the given feature string file. Return the percentage accuracy of the conversion. ''' feature_strings = load_feature_strings(path.join(base_directory, 'data', filename)) matches = [] for segment in segments: base_segment = Segment.from_dictionary(segment) matches.append((segment['IPA'], deparse.segment_match(feature_strings, base_segment))) for diacritic in diacritics: IPA_representation = segment['IPA'] + diacritic['IPA'] if base_segment.meets_conditions(diacritic['conditions']): diacritic_segment = base_segment + Segment(diacritic['applies'].get('positive', []), diacritic['applies'].get('negative', [])) matches.append((IPA_representation, deparse.segment_match(feature_strings, diacritic_segment))) successes = len([match for match in matches if match[0] == match[1]]) return (successes / len(matches)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add match accuracy benchmark script<commit_after>from tabulate import tabulate import csv import yaml import sys import os.path as path base_directory = path.dirname(path.dirname(path.abspath(__file__))) sys.path.append(base_directory) import deparse from segment import Segment def load_segments(filename): '''Load a segment feature matrix from a CSV file, returning a list of dictionaries with information about each segment. ''' with open(filename, 'r') as f: return [segment for segment in csv.DictReader(f)] def load_feature_strings(filename): '''Load a feature string list from a CSV file, returning a list of lists where the first item in each is the IPA string and the second is the corresponding feature string. ''' with open(filename, 'r') as f: return [line for line in csv.reader(f)] def load_diacritics(filename): '''Load diacritic data from a YAML file, returning a list of dictionaries with information about each diacritic. ''' with open(filename, 'r') as f: return yaml.load(f) def main(): segments = load_segments(path.join(base_directory, 'data', 'features.csv')) diacritics = load_diacritics(path.join(base_directory, 'data', 'diacritics.yaml')) datasets = ['feature-strings', 'hayes-feature-strings', 'feature-strings-with-diacritics'] print('Beginning benchmark\n===================\n') results = [] for dataset in datasets: print('Running dataset: {0}'.format(dataset)) filename = '{0}.csv'.format(dataset) accuracy = benchmark_match_accuracy(segments, diacritics, filename) results.append([dataset, accuracy]) print('Finished!\n') print(tabulate(results, headers=['Dataset', 'Accuracy'])) def benchmark_match_accuracy(segments, diacritics, filename): '''Convert all given segments to feature strings, then convert back to segments. Use the given feature string file. Return the percentage accuracy of the conversion. ''' feature_strings = load_feature_strings(path.join(base_directory, 'data', filename)) matches = [] for segment in segments: base_segment = Segment.from_dictionary(segment) matches.append((segment['IPA'], deparse.segment_match(feature_strings, base_segment))) for diacritic in diacritics: IPA_representation = segment['IPA'] + diacritic['IPA'] if base_segment.meets_conditions(diacritic['conditions']): diacritic_segment = base_segment + Segment(diacritic['applies'].get('positive', []), diacritic['applies'].get('negative', [])) matches.append((IPA_representation, deparse.segment_match(feature_strings, diacritic_segment))) successes = len([match for match in matches if match[0] == match[1]]) return (successes / len(matches)) if __name__ == '__main__': main()
d627397fa4c7544a39c6be648ffaf82536ac06f0
tests/rules_tests/clearAfterNonTermRemove/WithEpsilonTest.py
tests/rules_tests/clearAfterNonTermRemove/WithEpsilonTest.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 19.08.2017 17:25 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class Rules(Rule): rules = [ ([A], [B, C]), ([A], [EPS]), ([B], [0, 1])] class WithEpsilonTest(TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) self.g = Grammar() def setUp(self): self.g = Grammar(terminals=[0, 1], nonterminals=[A, B, C], rules=[Rules]) def test_removeB(self): self.assertEqual(self.g.rules_count(), 3) self.g.remove_nonterm(B) self.assertEqual(self.g.rules_count(), 1) if __name__ == '__main__': main()
Add test of rules with EPS
Add test of rules with EPS
Python
mit
PatrikValkovic/grammpy
Add test of rules with EPS
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 19.08.2017 17:25 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class Rules(Rule): rules = [ ([A], [B, C]), ([A], [EPS]), ([B], [0, 1])] class WithEpsilonTest(TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) self.g = Grammar() def setUp(self): self.g = Grammar(terminals=[0, 1], nonterminals=[A, B, C], rules=[Rules]) def test_removeB(self): self.assertEqual(self.g.rules_count(), 3) self.g.remove_nonterm(B) self.assertEqual(self.g.rules_count(), 1) if __name__ == '__main__': main()
<commit_before><commit_msg>Add test of rules with EPS<commit_after>
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 19.08.2017 17:25 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class Rules(Rule): rules = [ ([A], [B, C]), ([A], [EPS]), ([B], [0, 1])] class WithEpsilonTest(TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) self.g = Grammar() def setUp(self): self.g = Grammar(terminals=[0, 1], nonterminals=[A, B, C], rules=[Rules]) def test_removeB(self): self.assertEqual(self.g.rules_count(), 3) self.g.remove_nonterm(B) self.assertEqual(self.g.rules_count(), 1) if __name__ == '__main__': main()
Add test of rules with EPS#!/usr/bin/env python """ :Author Patrik Valkovic :Created 19.08.2017 17:25 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class Rules(Rule): rules = [ ([A], [B, C]), ([A], [EPS]), ([B], [0, 1])] class WithEpsilonTest(TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) self.g = Grammar() def setUp(self): self.g = Grammar(terminals=[0, 1], nonterminals=[A, B, C], rules=[Rules]) def test_removeB(self): self.assertEqual(self.g.rules_count(), 3) self.g.remove_nonterm(B) self.assertEqual(self.g.rules_count(), 1) if __name__ == '__main__': main()
<commit_before><commit_msg>Add test of rules with EPS<commit_after>#!/usr/bin/env python """ :Author Patrik Valkovic :Created 19.08.2017 17:25 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import * class A(Nonterminal): pass class B(Nonterminal): pass class C(Nonterminal): pass class Rules(Rule): rules = [ ([A], [B, C]), ([A], [EPS]), ([B], [0, 1])] class WithEpsilonTest(TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) self.g = Grammar() def setUp(self): self.g = Grammar(terminals=[0, 1], nonterminals=[A, B, C], rules=[Rules]) def test_removeB(self): self.assertEqual(self.g.rules_count(), 3) self.g.remove_nonterm(B) self.assertEqual(self.g.rules_count(), 1) if __name__ == '__main__': main()
6dd95098cd65fc31e07ae6339d9c86ec3028cf5e
examples/load_save.py
examples/load_save.py
from __future__ import print_function import numpy from molml.features import CoulombMatrix from molml.crystal import GenerallizedCrystal from molml.utils import load_json # Define some base data H2_ELES = ['H', 'H'] H2_COORDS = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] H2_UNIT = numpy.array([ [2., .5, 0.], [.25, 1., 0.], [0., .3, 1.], ]) H2 = (H2_ELES, H2_COORDS) H2_FULL = (H2_ELES, H2_COORDS, H2_UNIT) HCN_ELES = ['H', 'C', 'N'] HCN_COORDS = [ [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] HCN = (HCN_ELES, HCN_COORDS) if __name__ == "__main__": # Example of fitting the Coulomb matrix and then saving it feat = CoulombMatrix() feat.fit([H2, HCN]) print("Saving Model") feat.save_json("coulomb_model.json") print("Loading Model") feat2 = load_json("coulomb_model.json") print(feat2.transform([H2, HCN])) # Example of fitting a generallized crystal with the Coulomb matrix and # then saving it input_type = ("elements", "coords", "unit_cell") radius = 4.1 feat = CoulombMatrix(input_type=input_type) crystal = GenerallizedCrystal(transformer=feat, radius=radius) feat.fit([H2_FULL]) print("Saving Model") feat.save_json("coulomb_crystal_model.json") print("Loading Model") feat2 = load_json("coulomb_crystal_model.json") print(feat2.transform([H2_FULL]))
Add a script to show loading/saving models
Add a script to show loading/saving models
Python
mit
crcollins/molml
Add a script to show loading/saving models
from __future__ import print_function import numpy from molml.features import CoulombMatrix from molml.crystal import GenerallizedCrystal from molml.utils import load_json # Define some base data H2_ELES = ['H', 'H'] H2_COORDS = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] H2_UNIT = numpy.array([ [2., .5, 0.], [.25, 1., 0.], [0., .3, 1.], ]) H2 = (H2_ELES, H2_COORDS) H2_FULL = (H2_ELES, H2_COORDS, H2_UNIT) HCN_ELES = ['H', 'C', 'N'] HCN_COORDS = [ [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] HCN = (HCN_ELES, HCN_COORDS) if __name__ == "__main__": # Example of fitting the Coulomb matrix and then saving it feat = CoulombMatrix() feat.fit([H2, HCN]) print("Saving Model") feat.save_json("coulomb_model.json") print("Loading Model") feat2 = load_json("coulomb_model.json") print(feat2.transform([H2, HCN])) # Example of fitting a generallized crystal with the Coulomb matrix and # then saving it input_type = ("elements", "coords", "unit_cell") radius = 4.1 feat = CoulombMatrix(input_type=input_type) crystal = GenerallizedCrystal(transformer=feat, radius=radius) feat.fit([H2_FULL]) print("Saving Model") feat.save_json("coulomb_crystal_model.json") print("Loading Model") feat2 = load_json("coulomb_crystal_model.json") print(feat2.transform([H2_FULL]))
<commit_before><commit_msg>Add a script to show loading/saving models<commit_after>
from __future__ import print_function import numpy from molml.features import CoulombMatrix from molml.crystal import GenerallizedCrystal from molml.utils import load_json # Define some base data H2_ELES = ['H', 'H'] H2_COORDS = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] H2_UNIT = numpy.array([ [2., .5, 0.], [.25, 1., 0.], [0., .3, 1.], ]) H2 = (H2_ELES, H2_COORDS) H2_FULL = (H2_ELES, H2_COORDS, H2_UNIT) HCN_ELES = ['H', 'C', 'N'] HCN_COORDS = [ [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] HCN = (HCN_ELES, HCN_COORDS) if __name__ == "__main__": # Example of fitting the Coulomb matrix and then saving it feat = CoulombMatrix() feat.fit([H2, HCN]) print("Saving Model") feat.save_json("coulomb_model.json") print("Loading Model") feat2 = load_json("coulomb_model.json") print(feat2.transform([H2, HCN])) # Example of fitting a generallized crystal with the Coulomb matrix and # then saving it input_type = ("elements", "coords", "unit_cell") radius = 4.1 feat = CoulombMatrix(input_type=input_type) crystal = GenerallizedCrystal(transformer=feat, radius=radius) feat.fit([H2_FULL]) print("Saving Model") feat.save_json("coulomb_crystal_model.json") print("Loading Model") feat2 = load_json("coulomb_crystal_model.json") print(feat2.transform([H2_FULL]))
Add a script to show loading/saving modelsfrom __future__ import print_function import numpy from molml.features import CoulombMatrix from molml.crystal import GenerallizedCrystal from molml.utils import load_json # Define some base data H2_ELES = ['H', 'H'] H2_COORDS = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] H2_UNIT = numpy.array([ [2., .5, 0.], [.25, 1., 0.], [0., .3, 1.], ]) H2 = (H2_ELES, H2_COORDS) H2_FULL = (H2_ELES, H2_COORDS, H2_UNIT) HCN_ELES = ['H', 'C', 'N'] HCN_COORDS = [ [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] HCN = (HCN_ELES, HCN_COORDS) if __name__ == "__main__": # Example of fitting the Coulomb matrix and then saving it feat = CoulombMatrix() feat.fit([H2, HCN]) print("Saving Model") feat.save_json("coulomb_model.json") print("Loading Model") feat2 = load_json("coulomb_model.json") print(feat2.transform([H2, HCN])) # Example of fitting a generallized crystal with the Coulomb matrix and # then saving it input_type = ("elements", "coords", "unit_cell") radius = 4.1 feat = CoulombMatrix(input_type=input_type) crystal = GenerallizedCrystal(transformer=feat, radius=radius) feat.fit([H2_FULL]) print("Saving Model") feat.save_json("coulomb_crystal_model.json") print("Loading Model") feat2 = load_json("coulomb_crystal_model.json") print(feat2.transform([H2_FULL]))
<commit_before><commit_msg>Add a script to show loading/saving models<commit_after>from __future__ import print_function import numpy from molml.features import CoulombMatrix from molml.crystal import GenerallizedCrystal from molml.utils import load_json # Define some base data H2_ELES = ['H', 'H'] H2_COORDS = [ [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] H2_UNIT = numpy.array([ [2., .5, 0.], [.25, 1., 0.], [0., .3, 1.], ]) H2 = (H2_ELES, H2_COORDS) H2_FULL = (H2_ELES, H2_COORDS, H2_UNIT) HCN_ELES = ['H', 'C', 'N'] HCN_COORDS = [ [-1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [1.0, 0.0, 0.0], ] HCN = (HCN_ELES, HCN_COORDS) if __name__ == "__main__": # Example of fitting the Coulomb matrix and then saving it feat = CoulombMatrix() feat.fit([H2, HCN]) print("Saving Model") feat.save_json("coulomb_model.json") print("Loading Model") feat2 = load_json("coulomb_model.json") print(feat2.transform([H2, HCN])) # Example of fitting a generallized crystal with the Coulomb matrix and # then saving it input_type = ("elements", "coords", "unit_cell") radius = 4.1 feat = CoulombMatrix(input_type=input_type) crystal = GenerallizedCrystal(transformer=feat, radius=radius) feat.fit([H2_FULL]) print("Saving Model") feat.save_json("coulomb_crystal_model.json") print("Loading Model") feat2 = load_json("coulomb_crystal_model.json") print(feat2.transform([H2_FULL]))
10e9fd1dd93cee2fd96fcb50f9a5ec1607fb9c66
jacquard/directory/django.py
jacquard/directory/django.py
import sqlalchemy from .base import Directory, User class DjangoDirectory(Directory): query = """ SELECT auth_user.id, auth_user.date_joined, auth_user.is_superuser FROM auth_user """ def __init__(self, url): self.engine = sqlalchemy.create_engine(url) def describe_user(self, row): tags = [] if row.is_superuser: tags.append('superuser') return User( id=row.id, join_date=row.date_joined, tags=tuple(tags), ) def lookup_user(self, user_id): query = self.query + " WHERE id = ?" result = self.engine.execute(query, int(user_id)) return describe_user(next(iter(result))) def all_users(self): result = self.engine.execute(self.query) for row in result: yield self.describe_user(row)
Add base Django user directory
Add base Django user directory
Python
mit
prophile/jacquard,prophile/jacquard
Add base Django user directory
import sqlalchemy from .base import Directory, User class DjangoDirectory(Directory): query = """ SELECT auth_user.id, auth_user.date_joined, auth_user.is_superuser FROM auth_user """ def __init__(self, url): self.engine = sqlalchemy.create_engine(url) def describe_user(self, row): tags = [] if row.is_superuser: tags.append('superuser') return User( id=row.id, join_date=row.date_joined, tags=tuple(tags), ) def lookup_user(self, user_id): query = self.query + " WHERE id = ?" result = self.engine.execute(query, int(user_id)) return describe_user(next(iter(result))) def all_users(self): result = self.engine.execute(self.query) for row in result: yield self.describe_user(row)
<commit_before><commit_msg>Add base Django user directory<commit_after>
import sqlalchemy from .base import Directory, User class DjangoDirectory(Directory): query = """ SELECT auth_user.id, auth_user.date_joined, auth_user.is_superuser FROM auth_user """ def __init__(self, url): self.engine = sqlalchemy.create_engine(url) def describe_user(self, row): tags = [] if row.is_superuser: tags.append('superuser') return User( id=row.id, join_date=row.date_joined, tags=tuple(tags), ) def lookup_user(self, user_id): query = self.query + " WHERE id = ?" result = self.engine.execute(query, int(user_id)) return describe_user(next(iter(result))) def all_users(self): result = self.engine.execute(self.query) for row in result: yield self.describe_user(row)
Add base Django user directoryimport sqlalchemy from .base import Directory, User class DjangoDirectory(Directory): query = """ SELECT auth_user.id, auth_user.date_joined, auth_user.is_superuser FROM auth_user """ def __init__(self, url): self.engine = sqlalchemy.create_engine(url) def describe_user(self, row): tags = [] if row.is_superuser: tags.append('superuser') return User( id=row.id, join_date=row.date_joined, tags=tuple(tags), ) def lookup_user(self, user_id): query = self.query + " WHERE id = ?" result = self.engine.execute(query, int(user_id)) return describe_user(next(iter(result))) def all_users(self): result = self.engine.execute(self.query) for row in result: yield self.describe_user(row)
<commit_before><commit_msg>Add base Django user directory<commit_after>import sqlalchemy from .base import Directory, User class DjangoDirectory(Directory): query = """ SELECT auth_user.id, auth_user.date_joined, auth_user.is_superuser FROM auth_user """ def __init__(self, url): self.engine = sqlalchemy.create_engine(url) def describe_user(self, row): tags = [] if row.is_superuser: tags.append('superuser') return User( id=row.id, join_date=row.date_joined, tags=tuple(tags), ) def lookup_user(self, user_id): query = self.query + " WHERE id = ?" result = self.engine.execute(query, int(user_id)) return describe_user(next(iter(result))) def all_users(self): result = self.engine.execute(self.query) for row in result: yield self.describe_user(row)
38eafc8a4a78da04b0260d2bad6a38addb82ee4a
admin_honeypot/migrations/0002_add_field_LoginAttempt_path.py
admin_honeypot/migrations/0002_add_field_LoginAttempt_path.py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'LoginAttempt.path' db.add_column('admin_honeypot_loginattempt', 'path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'LoginAttempt.path' db.delete_column('admin_honeypot_loginattempt', 'path') models = { 'admin_honeypot.loginattempt': { 'Meta': {'ordering': "('timestamp',)", 'object_name': 'LoginAttempt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['admin_honeypot']
Add South migration for LoginAttempt.path
Add South migration for LoginAttempt.path
Python
mit
wujuguang/django-admin-honeypot,ajostergaard/django-admin-honeypot,dmpayton/django-admin-honeypot,Samael500/django-admin-honeypot,Samael500/django-admin-honeypot,ajostergaard/django-admin-honeypot,wujuguang/django-admin-honeypot,javierchavez/django-admin-honeypot,javierchavez/django-admin-honeypot,dmpayton/django-admin-honeypot
Add South migration for LoginAttempt.path
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'LoginAttempt.path' db.add_column('admin_honeypot_loginattempt', 'path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'LoginAttempt.path' db.delete_column('admin_honeypot_loginattempt', 'path') models = { 'admin_honeypot.loginattempt': { 'Meta': {'ordering': "('timestamp',)", 'object_name': 'LoginAttempt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['admin_honeypot']
<commit_before><commit_msg>Add South migration for LoginAttempt.path<commit_after>
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'LoginAttempt.path' db.add_column('admin_honeypot_loginattempt', 'path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'LoginAttempt.path' db.delete_column('admin_honeypot_loginattempt', 'path') models = { 'admin_honeypot.loginattempt': { 'Meta': {'ordering': "('timestamp',)", 'object_name': 'LoginAttempt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['admin_honeypot']
Add South migration for LoginAttempt.path# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'LoginAttempt.path' db.add_column('admin_honeypot_loginattempt', 'path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'LoginAttempt.path' db.delete_column('admin_honeypot_loginattempt', 'path') models = { 'admin_honeypot.loginattempt': { 'Meta': {'ordering': "('timestamp',)", 'object_name': 'LoginAttempt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['admin_honeypot']
<commit_before><commit_msg>Add South migration for LoginAttempt.path<commit_after># -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'LoginAttempt.path' db.add_column('admin_honeypot_loginattempt', 'path', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True), keep_default=False) def backwards(self, orm): # Deleting field 'LoginAttempt.path' db.delete_column('admin_honeypot_loginattempt', 'path') models = { 'admin_honeypot.loginattempt': { 'Meta': {'ordering': "('timestamp',)", 'object_name': 'LoginAttempt'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['admin_honeypot']
121e63c069a83411f15c66d42a9ade871ad9afab
dosagelib/plugins/studiokhimera.py
dosagelib/plugins/studiokhimera.py
# -*- coding: utf-8 -*- # Copyright (C) 2019-2020 Tobias Gruetzmacher from __future__ import absolute_import, division, print_function from ..scraper import _ParserScraper class StudioKhimera(_ParserScraper): imageSearch = '//figure[@class="gallery-item"]//img/@data-src' prevSearch = '//a[@rel="prev"]' def __init__(self, name, sub, last=None, adult=False, fixNames=False): super(StudioKhimera, self).__init__('StudioKhimera/' + name) self.baseUrl = 'https://%s.studiokhimera.com/' % sub self.stripUrl = self.baseUrl + '%s/' self.url = self.baseUrl + 'category/comicChapter/?latest' self.multipleImagesPerStrip = True if last: self.last = True self.url = self.stripUrl % last self.endOfLife = True if adult: self.adult = True def starter(self): # Retrieve list of chapter links chapterPage = self.getPage(self.baseUrl + 'archive/') self.chapters = chapterPage.xpath('//main//a/@href') self.firstStripUrl = self.chapters[0] return self.chapters[-1] def getPrevUrl(self, url, data): # Select previous chapter from list index = [i for i, ch in enumerate(self.chapters) if ch == url][0] if index == 0: return None return self.chapters[index - 1] def namer(self, imageUrl, pageUrl): # Fix inconsistent filenames filename = imageUrl.rsplit('/', 1)[-1] if 'uberquest' in pageUrl: filename = filename.replace('Page', 'UberQuest') filename = filename.replace('UberQuest01.', 'UberQuest001.') filename = filename.replace('UberQuest98.', 'UberQuest098.') filename = filename.replace('UberQuest99.', 'UberQuest099.') return filename @classmethod def getmodules(cls): return ( cls('Draconia', 'thedraconiachronicles', adult=True), cls('Eorah', 'eorah'), cls('Mousechievous', 'mousechievous'), cls('UberQuest', 'uberquest'), )
Add site engine for StudioKhimera
Add site engine for StudioKhimera
Python
mit
webcomics/dosage,peterjanes/dosage,webcomics/dosage,peterjanes/dosage
Add site engine for StudioKhimera
# -*- coding: utf-8 -*- # Copyright (C) 2019-2020 Tobias Gruetzmacher from __future__ import absolute_import, division, print_function from ..scraper import _ParserScraper class StudioKhimera(_ParserScraper): imageSearch = '//figure[@class="gallery-item"]//img/@data-src' prevSearch = '//a[@rel="prev"]' def __init__(self, name, sub, last=None, adult=False, fixNames=False): super(StudioKhimera, self).__init__('StudioKhimera/' + name) self.baseUrl = 'https://%s.studiokhimera.com/' % sub self.stripUrl = self.baseUrl + '%s/' self.url = self.baseUrl + 'category/comicChapter/?latest' self.multipleImagesPerStrip = True if last: self.last = True self.url = self.stripUrl % last self.endOfLife = True if adult: self.adult = True def starter(self): # Retrieve list of chapter links chapterPage = self.getPage(self.baseUrl + 'archive/') self.chapters = chapterPage.xpath('//main//a/@href') self.firstStripUrl = self.chapters[0] return self.chapters[-1] def getPrevUrl(self, url, data): # Select previous chapter from list index = [i for i, ch in enumerate(self.chapters) if ch == url][0] if index == 0: return None return self.chapters[index - 1] def namer(self, imageUrl, pageUrl): # Fix inconsistent filenames filename = imageUrl.rsplit('/', 1)[-1] if 'uberquest' in pageUrl: filename = filename.replace('Page', 'UberQuest') filename = filename.replace('UberQuest01.', 'UberQuest001.') filename = filename.replace('UberQuest98.', 'UberQuest098.') filename = filename.replace('UberQuest99.', 'UberQuest099.') return filename @classmethod def getmodules(cls): return ( cls('Draconia', 'thedraconiachronicles', adult=True), cls('Eorah', 'eorah'), cls('Mousechievous', 'mousechievous'), cls('UberQuest', 'uberquest'), )
<commit_before><commit_msg>Add site engine for StudioKhimera<commit_after>
# -*- coding: utf-8 -*- # Copyright (C) 2019-2020 Tobias Gruetzmacher from __future__ import absolute_import, division, print_function from ..scraper import _ParserScraper class StudioKhimera(_ParserScraper): imageSearch = '//figure[@class="gallery-item"]//img/@data-src' prevSearch = '//a[@rel="prev"]' def __init__(self, name, sub, last=None, adult=False, fixNames=False): super(StudioKhimera, self).__init__('StudioKhimera/' + name) self.baseUrl = 'https://%s.studiokhimera.com/' % sub self.stripUrl = self.baseUrl + '%s/' self.url = self.baseUrl + 'category/comicChapter/?latest' self.multipleImagesPerStrip = True if last: self.last = True self.url = self.stripUrl % last self.endOfLife = True if adult: self.adult = True def starter(self): # Retrieve list of chapter links chapterPage = self.getPage(self.baseUrl + 'archive/') self.chapters = chapterPage.xpath('//main//a/@href') self.firstStripUrl = self.chapters[0] return self.chapters[-1] def getPrevUrl(self, url, data): # Select previous chapter from list index = [i for i, ch in enumerate(self.chapters) if ch == url][0] if index == 0: return None return self.chapters[index - 1] def namer(self, imageUrl, pageUrl): # Fix inconsistent filenames filename = imageUrl.rsplit('/', 1)[-1] if 'uberquest' in pageUrl: filename = filename.replace('Page', 'UberQuest') filename = filename.replace('UberQuest01.', 'UberQuest001.') filename = filename.replace('UberQuest98.', 'UberQuest098.') filename = filename.replace('UberQuest99.', 'UberQuest099.') return filename @classmethod def getmodules(cls): return ( cls('Draconia', 'thedraconiachronicles', adult=True), cls('Eorah', 'eorah'), cls('Mousechievous', 'mousechievous'), cls('UberQuest', 'uberquest'), )
Add site engine for StudioKhimera# -*- coding: utf-8 -*- # Copyright (C) 2019-2020 Tobias Gruetzmacher from __future__ import absolute_import, division, print_function from ..scraper import _ParserScraper class StudioKhimera(_ParserScraper): imageSearch = '//figure[@class="gallery-item"]//img/@data-src' prevSearch = '//a[@rel="prev"]' def __init__(self, name, sub, last=None, adult=False, fixNames=False): super(StudioKhimera, self).__init__('StudioKhimera/' + name) self.baseUrl = 'https://%s.studiokhimera.com/' % sub self.stripUrl = self.baseUrl + '%s/' self.url = self.baseUrl + 'category/comicChapter/?latest' self.multipleImagesPerStrip = True if last: self.last = True self.url = self.stripUrl % last self.endOfLife = True if adult: self.adult = True def starter(self): # Retrieve list of chapter links chapterPage = self.getPage(self.baseUrl + 'archive/') self.chapters = chapterPage.xpath('//main//a/@href') self.firstStripUrl = self.chapters[0] return self.chapters[-1] def getPrevUrl(self, url, data): # Select previous chapter from list index = [i for i, ch in enumerate(self.chapters) if ch == url][0] if index == 0: return None return self.chapters[index - 1] def namer(self, imageUrl, pageUrl): # Fix inconsistent filenames filename = imageUrl.rsplit('/', 1)[-1] if 'uberquest' in pageUrl: filename = filename.replace('Page', 'UberQuest') filename = filename.replace('UberQuest01.', 'UberQuest001.') filename = filename.replace('UberQuest98.', 'UberQuest098.') filename = filename.replace('UberQuest99.', 'UberQuest099.') return filename @classmethod def getmodules(cls): return ( cls('Draconia', 'thedraconiachronicles', adult=True), cls('Eorah', 'eorah'), cls('Mousechievous', 'mousechievous'), cls('UberQuest', 'uberquest'), )
<commit_before><commit_msg>Add site engine for StudioKhimera<commit_after># -*- coding: utf-8 -*- # Copyright (C) 2019-2020 Tobias Gruetzmacher from __future__ import absolute_import, division, print_function from ..scraper import _ParserScraper class StudioKhimera(_ParserScraper): imageSearch = '//figure[@class="gallery-item"]//img/@data-src' prevSearch = '//a[@rel="prev"]' def __init__(self, name, sub, last=None, adult=False, fixNames=False): super(StudioKhimera, self).__init__('StudioKhimera/' + name) self.baseUrl = 'https://%s.studiokhimera.com/' % sub self.stripUrl = self.baseUrl + '%s/' self.url = self.baseUrl + 'category/comicChapter/?latest' self.multipleImagesPerStrip = True if last: self.last = True self.url = self.stripUrl % last self.endOfLife = True if adult: self.adult = True def starter(self): # Retrieve list of chapter links chapterPage = self.getPage(self.baseUrl + 'archive/') self.chapters = chapterPage.xpath('//main//a/@href') self.firstStripUrl = self.chapters[0] return self.chapters[-1] def getPrevUrl(self, url, data): # Select previous chapter from list index = [i for i, ch in enumerate(self.chapters) if ch == url][0] if index == 0: return None return self.chapters[index - 1] def namer(self, imageUrl, pageUrl): # Fix inconsistent filenames filename = imageUrl.rsplit('/', 1)[-1] if 'uberquest' in pageUrl: filename = filename.replace('Page', 'UberQuest') filename = filename.replace('UberQuest01.', 'UberQuest001.') filename = filename.replace('UberQuest98.', 'UberQuest098.') filename = filename.replace('UberQuest99.', 'UberQuest099.') return filename @classmethod def getmodules(cls): return ( cls('Draconia', 'thedraconiachronicles', adult=True), cls('Eorah', 'eorah'), cls('Mousechievous', 'mousechievous'), cls('UberQuest', 'uberquest'), )
8e870ebb5d55207d45cc2b583291d6d5e0f2d4c1
set_1/challenge_2.py
set_1/challenge_2.py
def buffers_xor(buffer1, buffer2): """buffer1, buffer2: byte-like objects.""" try: return bytes(b1 ^ b2 for b1, b2 in zip(buffer1, buffer2)) except TypeError: print("Function 'buffers_xor' takes byte-like" "objects as an arguments.") if __name__ == '__main__': print(buffers_xor(bytes.fromhex('1c0111001f010100061a024b53535009181c'), bytes.fromhex('686974207468652062756c6c277320657965'))) print(bytes.fromhex('746865206b696420646f6e277420706c6179'))
Set 1 / Challenge 2 done.
Set 1 / Challenge 2 done.
Python
mit
vitkarpenko/cryptopals
Set 1 / Challenge 2 done.
def buffers_xor(buffer1, buffer2): """buffer1, buffer2: byte-like objects.""" try: return bytes(b1 ^ b2 for b1, b2 in zip(buffer1, buffer2)) except TypeError: print("Function 'buffers_xor' takes byte-like" "objects as an arguments.") if __name__ == '__main__': print(buffers_xor(bytes.fromhex('1c0111001f010100061a024b53535009181c'), bytes.fromhex('686974207468652062756c6c277320657965'))) print(bytes.fromhex('746865206b696420646f6e277420706c6179'))
<commit_before><commit_msg>Set 1 / Challenge 2 done.<commit_after>
def buffers_xor(buffer1, buffer2): """buffer1, buffer2: byte-like objects.""" try: return bytes(b1 ^ b2 for b1, b2 in zip(buffer1, buffer2)) except TypeError: print("Function 'buffers_xor' takes byte-like" "objects as an arguments.") if __name__ == '__main__': print(buffers_xor(bytes.fromhex('1c0111001f010100061a024b53535009181c'), bytes.fromhex('686974207468652062756c6c277320657965'))) print(bytes.fromhex('746865206b696420646f6e277420706c6179'))
Set 1 / Challenge 2 done.def buffers_xor(buffer1, buffer2): """buffer1, buffer2: byte-like objects.""" try: return bytes(b1 ^ b2 for b1, b2 in zip(buffer1, buffer2)) except TypeError: print("Function 'buffers_xor' takes byte-like" "objects as an arguments.") if __name__ == '__main__': print(buffers_xor(bytes.fromhex('1c0111001f010100061a024b53535009181c'), bytes.fromhex('686974207468652062756c6c277320657965'))) print(bytes.fromhex('746865206b696420646f6e277420706c6179'))
<commit_before><commit_msg>Set 1 / Challenge 2 done.<commit_after>def buffers_xor(buffer1, buffer2): """buffer1, buffer2: byte-like objects.""" try: return bytes(b1 ^ b2 for b1, b2 in zip(buffer1, buffer2)) except TypeError: print("Function 'buffers_xor' takes byte-like" "objects as an arguments.") if __name__ == '__main__': print(buffers_xor(bytes.fromhex('1c0111001f010100061a024b53535009181c'), bytes.fromhex('686974207468652062756c6c277320657965'))) print(bytes.fromhex('746865206b696420646f6e277420706c6179'))