commit
stringlengths
40
40
old_file
stringlengths
4
150
new_file
stringlengths
4
150
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
501
message
stringlengths
15
4.06k
lang
stringclasses
4 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
diff
stringlengths
0
4.35k
b0c54c2d88db58a56e05f5da7d0d069a16c2b852
fabfile.py
fabfile.py
# # Edit `config` line to fit in your environemnt. # To install fabric and cuisne, # # # update setuptools # $ sudo pip install -U setuptools # $ sudo pip install setuptools # # $ sudo pip install fabric # $ sudo pip install cuisine # # You may need to speicfy ARCHFLAGFS on MacOSX environemnt. # (https://langui.sh/2014/03/10/wunused-command-line-argument-hard-error-in-future-is-a-harsh-mistress/) # # $ sudo ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install fabric # # from fabric.api import (env, sudo, put, get, cd, local) from fabric.utils import puts from fabric.colors import green from fabric.decorators import task from cuisine import (run, dir_ensure, dir_exists) # ----- config -------------------------------------------- env.hosts = ['localhost', 'k', 'linuxbox'] env.use_ssh_config = True # possible types are 'k_cross', 'linux64' or 'darwin64' host_type = { 'k' : 'k_cross' , 'xeon.francine' : 'linux64' , 'localhost' : 'darwin64' } remote_build_dir = { 'k' : '/path/to/dir' , 'linuxbox' : '/tmp' , 'localhost' : '/tmp' } # --------------------------------------------------------- @task def setup(): if not dir_exists(remote_build_dir[env.host_string]): dir_ensure(remote_build_dir[env.host_string], recursive=True) dir_ensure(remote_build_dir[env.host_string] + '/build') setup_surface() # Dependency: (None) @task def setup_surface(): puts(green('Configuring SURFACE')) local('git archive --format=tar.gz --prefix=SURFACE/ HEAD -o SURFACE.tar.gz') put('SURFACE.tar.gz', remote_build_dir[env.host_string] + '/SURFACE.tar.gz') with cd(remote_build_dir[env.host_string]): run('rm -rf SURFACE') run('tar -zxvf SURFACE.tar.gz') setup_script = "" if host_type[env.host_string] == 'k_cross': setup_script = './scripts/cmake_k_cross.sh' elif host_type[env.host_string] == 'linux64': setup_script = './scripts/cmake_linux_x64.sh' elif host_type[env.host_string] == 'darwin64': setup_script = './scripts/cmake_macosx.sh' else: print(host_type[env.host_string]) raise # todo with cd(remote_build_dir[env.host_string] + '/SURFACE'): run(setup_script) run('make -C build')
Add build script using python fabric.
Add build script using python fabric.
Python
bsd-2-clause
avr-aics-riken/SURFACE,avr-aics-riken/SURFACE,avr-aics-riken/SURFACE,avr-aics-riken/SURFACE
--- +++ @@ -0,0 +1,78 @@ +# +# Edit `config` line to fit in your environemnt. + +# To install fabric and cuisne, +# +# # update setuptools +# $ sudo pip install -U setuptools +# $ sudo pip install setuptools +# +# $ sudo pip install fabric +# $ sudo pip install cuisine +# +# You may need to speicfy ARCHFLAGFS on MacOSX environemnt. +# (https://langui.sh/2014/03/10/wunused-command-line-argument-hard-error-in-future-is-a-harsh-mistress/) +# +# $ sudo ARCHFLAGS=-Wno-error=unused-command-line-argument-hard-error-in-future pip install fabric +# +# +from fabric.api import (env, sudo, put, get, cd, local) +from fabric.utils import puts +from fabric.colors import green +from fabric.decorators import task +from cuisine import (run, dir_ensure, dir_exists) + +# ----- config -------------------------------------------- +env.hosts = ['localhost', 'k', 'linuxbox'] +env.use_ssh_config = True + +# possible types are 'k_cross', 'linux64' or 'darwin64' +host_type = { + 'k' : 'k_cross' + , 'xeon.francine' : 'linux64' + , 'localhost' : 'darwin64' +} + +remote_build_dir = { + 'k' : '/path/to/dir' + , 'linuxbox' : '/tmp' + , 'localhost' : '/tmp' +} +# --------------------------------------------------------- + +@task +def setup(): + + if not dir_exists(remote_build_dir[env.host_string]): + dir_ensure(remote_build_dir[env.host_string], recursive=True) + + dir_ensure(remote_build_dir[env.host_string] + '/build') + + setup_surface() + +# Dependency: (None) +@task +def setup_surface(): + puts(green('Configuring SURFACE')) + + local('git archive --format=tar.gz --prefix=SURFACE/ HEAD -o SURFACE.tar.gz') + put('SURFACE.tar.gz', remote_build_dir[env.host_string] + '/SURFACE.tar.gz') + + with cd(remote_build_dir[env.host_string]): + run('rm -rf SURFACE') + run('tar -zxvf SURFACE.tar.gz') + + setup_script = "" + if host_type[env.host_string] == 'k_cross': + setup_script = './scripts/cmake_k_cross.sh' + elif host_type[env.host_string] == 'linux64': + setup_script = './scripts/cmake_linux_x64.sh' + elif host_type[env.host_string] == 'darwin64': + setup_script = './scripts/cmake_macosx.sh' + else: + print(host_type[env.host_string]) + raise # todo + + with cd(remote_build_dir[env.host_string] + '/SURFACE'): + run(setup_script) + run('make -C build')
ddb514e470502160385731d6e01d8b6831a36079
test/skills/intent_service.py
test/skills/intent_service.py
import unittest from mycroft.skills.intent_service import IntentService, ContextManager class MockEmitter(object): def __init__(self): self.reset() def emit(self, message): self.types.append(message.type) self.results.append(message.data) def get_types(self): return self.types def get_results(self): return self.results def reset(self): self.types = [] self.results = [] class ContextManagerTest(unittest.TestCase): emitter = MockEmitter() def setUp(self): self.context_manager = ContextManager(3) def test_add_context(self): entity = {'confidence': 1.0} context = 'TestContext' word = 'TestWord' print "Adding " + context entity['data'] = [(word, context)] entity['match'] = word entity['key'] = word self.assertEqual(len(self.context_manager.frame_stack), 0) self.context_manager.inject_context(entity) self.assertEqual(len(self.context_manager.frame_stack), 1) def test_remove_context(self): entity = {'confidence': 1.0} context = 'TestContext' word = 'TestWord' print "Adding " + context entity['data'] = [(word, context)] entity['match'] = word entity['key'] = word self.context_manager.inject_context(entity) self.assertEqual(len(self.context_manager.frame_stack), 1) self.context_manager.remove_context('TestContext') self.assertEqual(len(self.context_manager.frame_stack), 0) if __name__ == '__main__': unittest.main()
Add basic tests for context manager
Add basic tests for context manager
Python
apache-2.0
linuxipho/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core,aatchison/mycroft-core,forslund/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,Dark5ide/mycroft-core,MycroftAI/mycroft-core,aatchison/mycroft-core
--- +++ @@ -0,0 +1,59 @@ +import unittest +from mycroft.skills.intent_service import IntentService, ContextManager + + +class MockEmitter(object): + def __init__(self): + self.reset() + + def emit(self, message): + self.types.append(message.type) + self.results.append(message.data) + + def get_types(self): + return self.types + + def get_results(self): + return self.results + + def reset(self): + self.types = [] + self.results = [] + + +class ContextManagerTest(unittest.TestCase): + emitter = MockEmitter() + + def setUp(self): + self.context_manager = ContextManager(3) + + def test_add_context(self): + entity = {'confidence': 1.0} + context = 'TestContext' + word = 'TestWord' + print "Adding " + context + entity['data'] = [(word, context)] + entity['match'] = word + entity['key'] = word + + self.assertEqual(len(self.context_manager.frame_stack), 0) + self.context_manager.inject_context(entity) + self.assertEqual(len(self.context_manager.frame_stack), 1) + + def test_remove_context(self): + entity = {'confidence': 1.0} + context = 'TestContext' + word = 'TestWord' + print "Adding " + context + entity['data'] = [(word, context)] + entity['match'] = word + entity['key'] = word + + self.context_manager.inject_context(entity) + self.assertEqual(len(self.context_manager.frame_stack), 1) + self.context_manager.remove_context('TestContext') + self.assertEqual(len(self.context_manager.frame_stack), 0) + + +if __name__ == '__main__': + unittest.main()
170a4ecbca4624fba1207b297cd41e17e7b1a8c7
fedmsg.d/fasclient-example-config.py
fedmsg.d/fasclient-example-config.py
config = { 'fasclient.consumer.enabled': True, 'fasclient.consumer.delay': 10, # 10 seconds 'fasclient.consumer.serial': 3, # 3 hosts at a time }
Add the example fedmsg consumer configuration file
Add the example fedmsg consumer configuration file
Python
lgpl-2.1
fedora-infra/fedmsg-fasclient
--- +++ @@ -0,0 +1,5 @@ +config = { + 'fasclient.consumer.enabled': True, + 'fasclient.consumer.delay': 10, # 10 seconds + 'fasclient.consumer.serial': 3, # 3 hosts at a time +}
65dea8930509eee7b35af8876b15edda032aa368
example/tests/test_views.py
example/tests/test_views.py
from django.core.urlresolvers import reverse from django.test import TestCase import json from myshop.models.polymorphic.product import Product from myshop.models.manufacturer import Manufacturer class ProductSelectViewTest(TestCase): def setUp(self): manufacturer = Manufacturer.objects.create(name="testmanufacturer") Product.objects.create(product_name="testproduct1", order=1, manufacturer=manufacturer) def test_finds_product_case_insensitive(self): response = self.client.get(reverse('shop:select-product') + "?term=Prod") data = json.loads(response.content) self.assertEqual(data['count'], 1) self.assertEqual(data['results'][0]['text'], "testproduct1") def test_bogus_query_finds_nothing(self): response = self.client.get(reverse('shop:select-product') + "?term=whatever") data = json.loads(response.content) self.assertEqual(data['count'], 0)
Add a test for ProductSelectView
Add a test for ProductSelectView
Python
bsd-3-clause
awesto/django-shop,nimbis/django-shop,awesto/django-shop,jrief/django-shop,jrief/django-shop,jrief/django-shop,khchine5/django-shop,nimbis/django-shop,divio/django-shop,khchine5/django-shop,khchine5/django-shop,nimbis/django-shop,divio/django-shop,divio/django-shop,jrief/django-shop,nimbis/django-shop,awesto/django-shop,khchine5/django-shop
--- +++ @@ -0,0 +1,25 @@ +from django.core.urlresolvers import reverse +from django.test import TestCase + +import json + +from myshop.models.polymorphic.product import Product +from myshop.models.manufacturer import Manufacturer + + +class ProductSelectViewTest(TestCase): + + def setUp(self): + manufacturer = Manufacturer.objects.create(name="testmanufacturer") + Product.objects.create(product_name="testproduct1", order=1, manufacturer=manufacturer) + + def test_finds_product_case_insensitive(self): + response = self.client.get(reverse('shop:select-product') + "?term=Prod") + data = json.loads(response.content) + self.assertEqual(data['count'], 1) + self.assertEqual(data['results'][0]['text'], "testproduct1") + + def test_bogus_query_finds_nothing(self): + response = self.client.get(reverse('shop:select-product') + "?term=whatever") + data = json.loads(response.content) + self.assertEqual(data['count'], 0)
4f46ab95f012c67d6bf6188987c618e3150cb63a
tests/statusbar_test.py
tests/statusbar_test.py
#!/usr/bin/env python # encoding: utf-8 """Statusbar tests for vimiv's test suite.""" from unittest import main from vimiv_testcase import VimivTestCase class StatusbarTest(VimivTestCase): """Statusbar Tests.""" @classmethod def setUpClass(cls): cls.init_test(cls) cls.statusbar = cls.vimiv["statusbar"] # Remove the initial library error cls.statusbar.error_false() def test_toggle_statusbar(self): """Toggle the statusbar.""" self.assertTrue(self.statusbar.bar.is_visible()) self.assertFalse(self.statusbar.hidden) # Hide self.statusbar.toggle() self.assertFalse(self.statusbar.bar.is_visible()) self.assertTrue(self.statusbar.hidden) # Show again self.statusbar.toggle() self.assertTrue(self.statusbar.bar.is_visible()) self.assertFalse(self.statusbar.hidden) def test_err_message(self): """Show an error message.""" self.statusbar.err_message("Test error") self.assertEqual(self.statusbar.left_label.get_text(), "Test error") # Timer is running self.assertGreater(self.statusbar.timer_id, 0) # Remove error message by hand self.statusbar.error_false() self.assertNotEqual(self.statusbar.left_label.get_text(), "Test error") def test_hidden_err_message(self): """Show an error message with an initially hidden statusbar.""" # Hide self.statusbar.toggle() self.assertFalse(self.statusbar.bar.is_visible()) # Send an error message self.statusbar.err_message("Test error") self.assertEqual(self.statusbar.left_label.get_text(), "Test error") self.assertTrue(self.statusbar.bar.is_visible()) # Remove error message self.statusbar.error_false() self.assertNotEqual(self.statusbar.left_label.get_text(), "Test error") self.assertFalse(self.statusbar.bar.is_visible()) # Show again self.statusbar.toggle() self.assertTrue(self.statusbar.bar.is_visible()) if __name__ == '__main__': main()
Add test for the statusbar
Add test for the statusbar
Python
mit
karlch/vimiv,karlch/vimiv,karlch/vimiv
--- +++ @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# encoding: utf-8 +"""Statusbar tests for vimiv's test suite.""" + +from unittest import main +from vimiv_testcase import VimivTestCase + + +class StatusbarTest(VimivTestCase): + """Statusbar Tests.""" + + @classmethod + def setUpClass(cls): + cls.init_test(cls) + cls.statusbar = cls.vimiv["statusbar"] + # Remove the initial library error + cls.statusbar.error_false() + + def test_toggle_statusbar(self): + """Toggle the statusbar.""" + self.assertTrue(self.statusbar.bar.is_visible()) + self.assertFalse(self.statusbar.hidden) + # Hide + self.statusbar.toggle() + self.assertFalse(self.statusbar.bar.is_visible()) + self.assertTrue(self.statusbar.hidden) + # Show again + self.statusbar.toggle() + self.assertTrue(self.statusbar.bar.is_visible()) + self.assertFalse(self.statusbar.hidden) + + def test_err_message(self): + """Show an error message.""" + self.statusbar.err_message("Test error") + self.assertEqual(self.statusbar.left_label.get_text(), "Test error") + # Timer is running + self.assertGreater(self.statusbar.timer_id, 0) + # Remove error message by hand + self.statusbar.error_false() + self.assertNotEqual(self.statusbar.left_label.get_text(), "Test error") + + def test_hidden_err_message(self): + """Show an error message with an initially hidden statusbar.""" + # Hide + self.statusbar.toggle() + self.assertFalse(self.statusbar.bar.is_visible()) + # Send an error message + self.statusbar.err_message("Test error") + self.assertEqual(self.statusbar.left_label.get_text(), "Test error") + self.assertTrue(self.statusbar.bar.is_visible()) + # Remove error message + self.statusbar.error_false() + self.assertNotEqual(self.statusbar.left_label.get_text(), "Test error") + self.assertFalse(self.statusbar.bar.is_visible()) + # Show again + self.statusbar.toggle() + self.assertTrue(self.statusbar.bar.is_visible()) + + +if __name__ == '__main__': + main()
14cedb385e8345b11d9c9dfe5903f416e5d56780
src/models/separate_train_y.py
src/models/separate_train_y.py
# Built-in modules from os import path, pardir import sys import logging # not used in this stub but often useful for finding various files PROJECT_ROOT_DIRPATH = path.join(path.dirname(__file__), pardir, pardir) sys.path.append(PROJECT_ROOT_DIRPATH) # Third-party modules import click from dotenv import find_dotenv, load_dotenv # Hand-made modules from src.models.split import DatasetSplitHandler TRAIN_FILEPATH_PREFIX = path.join( PROJECT_ROOT_DIRPATH, "data/interim/dataset.train_X_y" ) TRAIN_FILEPATH_SUFFIX = "yonekurayama.blp" LOCATIONS = ( "ukishima", "ougishima", "yonekurayama" ) @click.command() @click.option("--location", "-l", type=str, default=None) @click.option("--n_splits", "-n", type=int, default=5) def main(location, n_splits): logger = logging.getLogger(__name__) logger.info('#0: separating cross-validation index') # # split train X and y # splitter = DatasetSplitHandler() if location is None: location_list = LOCATIONS else: location_list = [location, ] for place in location_list: train_filepath_prefix = path.join( PROJECT_ROOT_DIRPATH, "data/processed/dataset.train_X_y" ) splitter.separate_and_save_train_y(train_filepath_prefix, place) logger.info('#1: get cross-validation test index @ {l}'.format(l=place)) logger.info('#1: end separating the cross-validation index') if __name__ == '__main__': log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' logging.basicConfig(level=logging.INFO, format=log_fmt) # find .env automagically by walking up directories until it's found, then # load up the .env entries as environment variables load_dotenv(find_dotenv()) main()
Add a code of train y separation
Add a code of train y separation
Python
mit
gciteam6/xgboost,gciteam6/xgboost
--- +++ @@ -0,0 +1,64 @@ +# Built-in modules +from os import path, pardir +import sys +import logging + +# not used in this stub but often useful for finding various files +PROJECT_ROOT_DIRPATH = path.join(path.dirname(__file__), pardir, pardir) +sys.path.append(PROJECT_ROOT_DIRPATH) + +# Third-party modules +import click +from dotenv import find_dotenv, load_dotenv +# Hand-made modules +from src.models.split import DatasetSplitHandler + + +TRAIN_FILEPATH_PREFIX = path.join( + PROJECT_ROOT_DIRPATH, "data/interim/dataset.train_X_y" +) +TRAIN_FILEPATH_SUFFIX = "yonekurayama.blp" +LOCATIONS = ( + "ukishima", + "ougishima", + "yonekurayama" +) + + +@click.command() +@click.option("--location", "-l", type=str, default=None) +@click.option("--n_splits", "-n", type=int, default=5) +def main(location, n_splits): + logger = logging.getLogger(__name__) + logger.info('#0: separating cross-validation index') + + # + # split train X and y + # + splitter = DatasetSplitHandler() + + if location is None: + location_list = LOCATIONS + else: + location_list = [location, ] + + for place in location_list: + train_filepath_prefix = path.join( + PROJECT_ROOT_DIRPATH, "data/processed/dataset.train_X_y" + ) + splitter.separate_and_save_train_y(train_filepath_prefix, place) + + logger.info('#1: get cross-validation test index @ {l}'.format(l=place)) + + logger.info('#1: end separating the cross-validation index') + + +if __name__ == '__main__': + log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' + logging.basicConfig(level=logging.INFO, format=log_fmt) + + # find .env automagically by walking up directories until it's found, then + # load up the .env entries as environment variables + load_dotenv(find_dotenv()) + + main()
b489eded170200dcc90d04cf7382c5592edefd46
zerver/tests/test_migrations_0145.py
zerver/tests/test_migrations_0145.py
from zerver.lib.test_classes import MigrationsTestCase from zerver.lib.test_helpers import use_db_models, make_client from django.utils.timezone import now as timezone_now from django.db.migrations.state import StateApps from django.db.models.base import ModelBase from zerver.models import get_stream class EmojiName2IdTestCase(MigrationsTestCase): migrate_from = '0144_remove_realm_create_generic_bot_by_admins_only' migrate_to = '0145_reactions_realm_emoji_name_to_id' @use_db_models def setUpBeforeMigration(self, apps: StateApps) -> None: Reaction = apps.get_model('zerver', 'Reaction') RealmEmoji = apps.get_model('zerver', 'RealmEmoji') Message = apps.get_model('zerver', 'Message') Recipient = apps.get_model('zerver', 'Recipient') sender = self.example_user('iago') realm = sender.realm sending_client = make_client(name="test suite") stream_name = 'Denmark' stream = get_stream(stream_name, realm) subject = 'foo' def send_fake_message(message_content: str, stream: ModelBase) -> ModelBase: recipient = Recipient.objects.get(type_id=stream.id, type=2) return Message.objects.create(sender = sender, recipient = recipient, subject = subject, content = message_content, pub_date = timezone_now(), sending_client = sending_client) message = send_fake_message('Test 1', stream) # Create reactions for all the realm emoji's on the message we faked. for realm_emoji in RealmEmoji.objects.all(): reaction = Reaction(user_profile=sender, message=message, emoji_name=realm_emoji.name, emoji_code=realm_emoji.name, reaction_type='realm_emoji') reaction.save() realm_emoji_reactions_count = Reaction.objects.filter(reaction_type='realm_emoji').count() self.assertEqual(realm_emoji_reactions_count, 1) def test_tags_migrated(self) -> None: Reaction = self.apps.get_model('zerver', 'Reaction') RealmEmoji = self.apps.get_model('zerver', 'RealmEmoji') realm_emoji_reactions = Reaction.objects.filter(reaction_type='realm_emoji') realm_emoji_reactions_count = realm_emoji_reactions.count() self.assertEqual(realm_emoji_reactions_count, 1) for reaction in realm_emoji_reactions: realm_emoji = RealmEmoji.objects.get( realm_id=reaction.user_profile.realm_id, name=reaction.emoji_name) self.assertEqual(reaction.emoji_code, str(realm_emoji.id))
Add test for 0145 migration.
migration_test: Add test for 0145 migration.
Python
apache-2.0
kou/zulip,hackerkid/zulip,jackrzhang/zulip,hackerkid/zulip,tommyip/zulip,punchagan/zulip,eeshangarg/zulip,punchagan/zulip,kou/zulip,eeshangarg/zulip,punchagan/zulip,tommyip/zulip,tommyip/zulip,shubhamdhama/zulip,kou/zulip,synicalsyntax/zulip,brainwane/zulip,tommyip/zulip,eeshangarg/zulip,zulip/zulip,zulip/zulip,zulip/zulip,andersk/zulip,tommyip/zulip,zulip/zulip,showell/zulip,rishig/zulip,brainwane/zulip,eeshangarg/zulip,rishig/zulip,shubhamdhama/zulip,showell/zulip,timabbott/zulip,dhcrzf/zulip,showell/zulip,timabbott/zulip,dhcrzf/zulip,andersk/zulip,kou/zulip,kou/zulip,brainwane/zulip,shubhamdhama/zulip,brainwane/zulip,synicalsyntax/zulip,synicalsyntax/zulip,andersk/zulip,jackrzhang/zulip,hackerkid/zulip,hackerkid/zulip,zulip/zulip,jackrzhang/zulip,brainwane/zulip,synicalsyntax/zulip,eeshangarg/zulip,brainwane/zulip,dhcrzf/zulip,rishig/zulip,rishig/zulip,synicalsyntax/zulip,showell/zulip,rishig/zulip,timabbott/zulip,punchagan/zulip,punchagan/zulip,andersk/zulip,showell/zulip,dhcrzf/zulip,kou/zulip,rht/zulip,hackerkid/zulip,shubhamdhama/zulip,rht/zulip,synicalsyntax/zulip,timabbott/zulip,jackrzhang/zulip,tommyip/zulip,jackrzhang/zulip,rht/zulip,showell/zulip,rishig/zulip,eeshangarg/zulip,rht/zulip,dhcrzf/zulip,andersk/zulip,shubhamdhama/zulip,jackrzhang/zulip,tommyip/zulip,dhcrzf/zulip,punchagan/zulip,andersk/zulip,kou/zulip,dhcrzf/zulip,hackerkid/zulip,timabbott/zulip,jackrzhang/zulip,zulip/zulip,rht/zulip,andersk/zulip,timabbott/zulip,hackerkid/zulip,shubhamdhama/zulip,punchagan/zulip,timabbott/zulip,zulip/zulip,rishig/zulip,showell/zulip,rht/zulip,synicalsyntax/zulip,rht/zulip,brainwane/zulip,eeshangarg/zulip,shubhamdhama/zulip
--- +++ @@ -0,0 +1,58 @@ +from zerver.lib.test_classes import MigrationsTestCase +from zerver.lib.test_helpers import use_db_models, make_client +from django.utils.timezone import now as timezone_now +from django.db.migrations.state import StateApps +from django.db.models.base import ModelBase + +from zerver.models import get_stream + +class EmojiName2IdTestCase(MigrationsTestCase): + + migrate_from = '0144_remove_realm_create_generic_bot_by_admins_only' + migrate_to = '0145_reactions_realm_emoji_name_to_id' + + @use_db_models + def setUpBeforeMigration(self, apps: StateApps) -> None: + Reaction = apps.get_model('zerver', 'Reaction') + RealmEmoji = apps.get_model('zerver', 'RealmEmoji') + Message = apps.get_model('zerver', 'Message') + Recipient = apps.get_model('zerver', 'Recipient') + + sender = self.example_user('iago') + realm = sender.realm + sending_client = make_client(name="test suite") + stream_name = 'Denmark' + stream = get_stream(stream_name, realm) + subject = 'foo' + + def send_fake_message(message_content: str, stream: ModelBase) -> ModelBase: + recipient = Recipient.objects.get(type_id=stream.id, type=2) + return Message.objects.create(sender = sender, + recipient = recipient, + subject = subject, + content = message_content, + pub_date = timezone_now(), + sending_client = sending_client) + message = send_fake_message('Test 1', stream) + + # Create reactions for all the realm emoji's on the message we faked. + for realm_emoji in RealmEmoji.objects.all(): + reaction = Reaction(user_profile=sender, message=message, + emoji_name=realm_emoji.name, emoji_code=realm_emoji.name, + reaction_type='realm_emoji') + reaction.save() + realm_emoji_reactions_count = Reaction.objects.filter(reaction_type='realm_emoji').count() + self.assertEqual(realm_emoji_reactions_count, 1) + + def test_tags_migrated(self) -> None: + Reaction = self.apps.get_model('zerver', 'Reaction') + RealmEmoji = self.apps.get_model('zerver', 'RealmEmoji') + + realm_emoji_reactions = Reaction.objects.filter(reaction_type='realm_emoji') + realm_emoji_reactions_count = realm_emoji_reactions.count() + self.assertEqual(realm_emoji_reactions_count, 1) + for reaction in realm_emoji_reactions: + realm_emoji = RealmEmoji.objects.get( + realm_id=reaction.user_profile.realm_id, + name=reaction.emoji_name) + self.assertEqual(reaction.emoji_code, str(realm_emoji.id))
9c7e73bb778ca8cdc60353dbfec484b965d11e4c
examples/download_full.py
examples/download_full.py
from __future__ import print_function from openload import OpenLoad def solve_captcha(captcha_url): """Return solved captcha string""" pass username = 'FTP Username/API Login' key = 'FTP Password/API Key' file_id = 'Id of the file will be downloaded' openload = OpenLoad(username, key) # Get a download ticket and captcha url. preparation_resp = openload.prepare_download(file_id) ticket = preparation_resp.get('ticket') # Sometimes no captcha is sent in openload.co API response. captcha_url = preparation_resp.get('captcha_url') if captcha_url: # Solve captcha. captcha_response = solve_captcha(captcha_url) else: captcha_response = '' download_resp = openload.get_download_link(file_id, ticket, captcha_response) direct_download_url = download_resp.get('url') # Process download url. print(direct_download_url)
Add full example to download file
Add full example to download file
Python
mit
mohan3d/PyOpenload
--- +++ @@ -0,0 +1,32 @@ +from __future__ import print_function + +from openload import OpenLoad + +def solve_captcha(captcha_url): + """Return solved captcha string""" + pass + +username = 'FTP Username/API Login' +key = 'FTP Password/API Key' +file_id = 'Id of the file will be downloaded' + +openload = OpenLoad(username, key) + +# Get a download ticket and captcha url. +preparation_resp = openload.prepare_download(file_id) +ticket = preparation_resp.get('ticket') + +# Sometimes no captcha is sent in openload.co API response. +captcha_url = preparation_resp.get('captcha_url') + +if captcha_url: + # Solve captcha. + captcha_response = solve_captcha(captcha_url) +else: + captcha_response = '' + +download_resp = openload.get_download_link(file_id, ticket, captcha_response) +direct_download_url = download_resp.get('url') + +# Process download url. +print(direct_download_url)
dabd96a85f15c7f9c198fa49982250d5cbad8b6b
newtype-annotated-experiments.py
newtype-annotated-experiments.py
# IPython log file import numpy as np import typing as t ImageData = t.Annotated[np.ndarray, 'image'] x : ImageData = np.random.random((512, 512)) print(__annotations__) def gaussian(image: ImageData, sigma: int = 1) -> ImageData: return image print(gaussian.__annotations__) print(gaussian.__annotations__['image'] is __annotations__['x']) ImageNewData = t.NewType('ImageNewData', np.ndarray) ImageNewData y : ImageNewData = np.random.random((512, 512)) print(__annotations__['y'] is ImageNewData) LabelsData = t.Annotated[np.ndarray, 'labels'] def slic(image: ImageData) -> LabelsData: return (image * 256).astype(int) class Segmenter(t.Protocol): def __call__(image: ImageData) -> LabelsData: ... def map_segments(f: Segmenter, images: List[ImageData]) -> List[LabelsData]: ... class Segmenter(t.Protocol): def __call__(image: ImageData, *args, **kwargs) -> LabelsData: ... def slic(image: ImageData, n_segments: int = 200) -> LabelsData: return (image * n_segments).astype(int)
Add brief experiments with creating types with newtype and annottated
Add brief experiments with creating types with newtype and annottated
Python
bsd-3-clause
jni/useful-histories
--- +++ @@ -0,0 +1,40 @@ +# IPython log file +import numpy as np + + +import typing as t +ImageData = t.Annotated[np.ndarray, 'image'] +x : ImageData = np.random.random((512, 512)) + +print(__annotations__) + +def gaussian(image: ImageData, sigma: int = 1) -> ImageData: + return image + +print(gaussian.__annotations__) +print(gaussian.__annotations__['image'] is __annotations__['x']) + +ImageNewData = t.NewType('ImageNewData', np.ndarray) +ImageNewData +y : ImageNewData = np.random.random((512, 512)) +print(__annotations__['y'] is ImageNewData) + +LabelsData = t.Annotated[np.ndarray, 'labels'] + +def slic(image: ImageData) -> LabelsData: + return (image * 256).astype(int) + +class Segmenter(t.Protocol): + def __call__(image: ImageData) -> LabelsData: + ... + +def map_segments(f: Segmenter, images: List[ImageData]) -> List[LabelsData]: + ... + +class Segmenter(t.Protocol): + def __call__(image: ImageData, *args, **kwargs) -> LabelsData: + ... + +def slic(image: ImageData, n_segments: int = 200) -> LabelsData: + return (image * n_segments).astype(int) +
5f69110a4a343a8ab6d3cc6b6efc6ca145897d94
ibmcnx/doc/Documentation.py
ibmcnx/doc/Documentation.py
###### # Create a file (html or markdown) with the output of # - JVMHeap # - LogFiles # - Ports # - Variables # # Author: Christoph Stoettner # Mail: christoph.stoettner@stoeps.de # Documentation: http://scripting101.stoeps.de # # Version: 2.0 # Date: 2014-06-08 # # License: Apache 2.0 # # TODO: Create a menu for file selection import ibmcnx.filehandle import sys sys.stdout = open("/tmp/documentation.txt", "w") print '# JVM Settings of all AppServers:' execfile( 'ibmcnx/doc/JVMSettings.py' ) print '# Used Ports:' execfile( 'ibmcnx/doc/Ports.py' ) print '# LogFile Settgins:' execfile( 'ibmcnx/doc/LogFiles.py' ) print '# WebSphere Variables' execfile( 'ibmcnx/doc/Variables.py' )
Create script to save documentation to a file
4: Create script to save documentation to a file Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/4
Python
apache-2.0
stoeps13/ibmcnx2,stoeps13/ibmcnx2
--- +++ @@ -0,0 +1,35 @@ +###### +# Create a file (html or markdown) with the output of +# - JVMHeap +# - LogFiles +# - Ports +# - Variables +# +# Author: Christoph Stoettner +# Mail: christoph.stoettner@stoeps.de +# Documentation: http://scripting101.stoeps.de +# +# Version: 2.0 +# Date: 2014-06-08 +# +# License: Apache 2.0 +# + +# TODO: Create a menu for file selection + +import ibmcnx.filehandle +import sys + +sys.stdout = open("/tmp/documentation.txt", "w") + +print '# JVM Settings of all AppServers:' +execfile( 'ibmcnx/doc/JVMSettings.py' ) + +print '# Used Ports:' +execfile( 'ibmcnx/doc/Ports.py' ) + +print '# LogFile Settgins:' +execfile( 'ibmcnx/doc/LogFiles.py' ) + +print '# WebSphere Variables' +execfile( 'ibmcnx/doc/Variables.py' )
550cda891d53dce79466687a694f7be2eb6e4d9d
upnpy/utils.py
upnpy/utils.py
# -*- coding: utf-8 -*- """ utils.py ~~~~~~~~ Defines utility functions used by UPnPy. """ def camelcase_to_underscore(text): """ Convert a camelCasedString to one separated_by_underscores. Treats neighbouring capitals as acronyms and doesn't separated them, e.g. URL does not become u_r_l. That would be stupid. :param text: The string to convert. """ outstr = [] for char in text: if char.is_lower(): outstr.append(char) elif outstr[-1].is_lower(): outstr.append('_') outstr.append(char.lower()) else: outstr.append(char.lower()) return ''.join(outstr)
Add camelCase to snake_case function.
Add camelCase to snake_case function.
Python
mit
Lukasa/upnpy,WenhaoYu/upnpy
--- +++ @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +""" +utils.py +~~~~~~~~ + +Defines utility functions used by UPnPy. +""" + + +def camelcase_to_underscore(text): + """ + Convert a camelCasedString to one separated_by_underscores. Treats + neighbouring capitals as acronyms and doesn't separated them, e.g. URL does + not become u_r_l. That would be stupid. + + :param text: The string to convert. + """ + outstr = [] + + for char in text: + if char.is_lower(): + outstr.append(char) + elif outstr[-1].is_lower(): + outstr.append('_') + outstr.append(char.lower()) + else: + outstr.append(char.lower()) + + return ''.join(outstr)
4f6a577df1c40fcc5d26107f71b3fa7eb3ca85e1
find_classes.py
find_classes.py
"""find_classes.py Find the emergent classes from the exposure matrix averaged over all MSAs in the US """ import csv import marble as mb # # Import exposure data # ## List of MSA msa = {} with open('data/names/msa.csv', 'r') as source: reader = csv.reader(source, delimiter='\t') reader.next() for rows in reader: msa[rows[0]] = rows[1] ## Import exposure values exposure_val = {} with open('extr/exposure/categories/us/msa_average/values.csv', 'r') as source: reader = csv.reader(source, delimiter='\t') categories = reader.next()[1:] for rows in reader: exposure_val[int(rows[0])] = {int(cat): float(val) for cat, val in zip(categories, rows[1:])} ## Import exposure variance exposure_var = {} with open('extr/exposure/categories/us/msa_average/variance.csv', 'r') as source: reader = csv.reader(source, delimiter='\t') categories = reader.next()[1:] for rows in reader: exposure_var[int(rows[0])] = {int(cat): float(var) for cat, var in zip(categories, rows[1:])} ## Households income households_all = {} for i, city in enumerate(msa): ## Import household income distribution households = {} with open('data/income/msa/%s/income.csv'%city, 'r') as source: reader = csv.reader(source, delimiter='\t') reader.next() for rows in reader: num_cat = len(rows[1:]) households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])} households_all[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])} # # Concantenate exposure values and variance # categories = [int(k) for k in exposure_val.iterkeys()] exp = {c0: {c1: (exposure_val[c0][c1], exposure_var[c0][c1]) for c1 in categories} for c0 in categories} print exposure_val # # Extract linkage matrix # link = mb.cluster_categories(households_all, exp) print link
Add script to extract the linkage matrix from the average US exposure matrix
Add script to extract the linkage matrix from the average US exposure matrix
Python
bsd-3-clause
rlouf/patterns-of-segregation
--- +++ @@ -0,0 +1,69 @@ +"""find_classes.py + +Find the emergent classes from the exposure matrix averaged over all MSAs in the +US +""" +import csv +import marble as mb + + +# +# Import exposure data +# + +## List of MSA +msa = {} +with open('data/names/msa.csv', 'r') as source: + reader = csv.reader(source, delimiter='\t') + reader.next() + for rows in reader: + msa[rows[0]] = rows[1] + +## Import exposure values +exposure_val = {} +with open('extr/exposure/categories/us/msa_average/values.csv', 'r') as source: + reader = csv.reader(source, delimiter='\t') + categories = reader.next()[1:] + for rows in reader: + exposure_val[int(rows[0])] = {int(cat): float(val) for cat, val + in zip(categories, rows[1:])} + +## Import exposure variance +exposure_var = {} +with open('extr/exposure/categories/us/msa_average/variance.csv', 'r') as source: + reader = csv.reader(source, delimiter='\t') + categories = reader.next()[1:] + for rows in reader: + exposure_var[int(rows[0])] = {int(cat): float(var) for cat, var + in zip(categories, rows[1:])} + +## Households income +households_all = {} +for i, city in enumerate(msa): + ## Import household income distribution + households = {} + with open('data/income/msa/%s/income.csv'%city, 'r') as source: + reader = csv.reader(source, delimiter='\t') + reader.next() + for rows in reader: + num_cat = len(rows[1:]) + households[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])} + households_all[rows[0]] = {c:int(h) for c,h in enumerate(rows[1:])} + + + +# +# Concantenate exposure values and variance +# +categories = [int(k) for k in exposure_val.iterkeys()] +exp = {c0: {c1: (exposure_val[c0][c1], + exposure_var[c0][c1]) + for c1 in categories} + for c0 in categories} + +print exposure_val +# +# Extract linkage matrix +# +link = mb.cluster_categories(households_all, exp) +print link
75290add3f338abb6542c2b1981fdde7c1117626
indra/statements/delta.py
indra/statements/delta.py
class Delta(object): """The parent class of all delta types.""" pass class QualitativeDelta(Delta): """Qualitative delta defining an Event. Parameters ---------- polarity : 1, -1 or None Polarity of an Event. adjectives : list[str] Adjectives describing an Event. """ def __init__(self, polarity=None, adjectives=None): self.polarity = polarity self.adjectives = adjectives if adjectives else [] def set_polarity(self, pol): self.polarity = pol def add_adjectives(self, adjectives): for adj in adjectives: self.adjectives.append(adj) def is_opposite(self, other): return ((self.polarity == 1 and other.polarity == -1) or (self.polarity == -1 and other.polarity == 1))
Define Delta and QualitativeDelta classes
Define Delta and QualitativeDelta classes
Python
bsd-2-clause
johnbachman/belpy,bgyori/indra,bgyori/indra,johnbachman/belpy,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,sorgerlab/indra
--- +++ @@ -0,0 +1,29 @@ +class Delta(object): + """The parent class of all delta types.""" + pass + + +class QualitativeDelta(Delta): + """Qualitative delta defining an Event. + + Parameters + ---------- + polarity : 1, -1 or None + Polarity of an Event. + adjectives : list[str] + Adjectives describing an Event. + """ + def __init__(self, polarity=None, adjectives=None): + self.polarity = polarity + self.adjectives = adjectives if adjectives else [] + + def set_polarity(self, pol): + self.polarity = pol + + def add_adjectives(self, adjectives): + for adj in adjectives: + self.adjectives.append(adj) + + def is_opposite(self, other): + return ((self.polarity == 1 and other.polarity == -1) or + (self.polarity == -1 and other.polarity == 1))
9031a8def9b797cbd8280a29e62c436e168f4096
txircd/modules/rfc/cmd_nick.py
txircd/modules/rfc/cmd_nick.py
from twisted.plugin import IPlugin from twisted.words.protocols import irc from txircd.module_interface import Command, ICommand, IModuleData, ModuleData from txircd.utils import ircLower, isValidNick from zope.interface import implements from datetime import datetime class NickCommand(ModuleData): implements(IPlugin, IModuleData) name = "NickCommand" core = True def hookIRCd(self, ircd): self.ircd = ircd def userCommands(self): return [ ("NICK", 1, NickUserCommand(self.ircd)) ] def serverCommands(self): return [ ("NICK", 1, NickServerCommand(self.ircd)) ] class NickUserCommand(Command): implements(ICommand) forRegisteredUsers = None def __init__(self, ircd): self.ircd = ircd def parseParams(self, user, params, prefix, tags): if not params or not params[0]: user.sendMessage(irc.ERR_NEEDMOREPARAMS, "NICK", ":Not enough parameters") return None if not isValidNick(params[0]): user.sendMessage(irc.ERR_ERRONEUSNICKNAME, params[0], ":Erroneous nickname") return None if params[0] in self.ircd.userNicks: otherUser = self.ircd.users[self.ircd.userNicks] if user != otherUser: user.sendMessage(irc.ERR_NICKNAMEINUSE, nick, ":Nickname is already in use") return None return { "nick": params[0] } def execute(self, user, data): user.changeNick(data["nick"]) if not user.isRegistered(): user.register("NICK") return True class NickServerCommand(Command): implements(ICommand) def __init__(self, ircd): self.ircd = ircd def parseParams(self, server, params, prefix, tags): if len(params) != 2: return None if prefix not in self.ircd.users: self.disconnect("Desync: User list") return None user = self.ircd.users[prefix] try: time = datetime.utcfromtimestamp(params[0]) except ValueError: return None if params[1] in self.ircd.userNicks: localUser = self.ircd.users[self.ircd.userNicks[params[1]]] if localUser != user: if localUser.localOnly: if "localnickcollision" in self.ircd.actions: for action in self.ircd.actions["localnickcollision"]: if action[0](user): break else: return None return { "user": user, "time": time, "nick": params[1] } return None self.disconnect("Desync: User data (nicknames)") return None return { "user": user, "time": time, "nick": params[1] } def execute(self, server, data): user = data["user"] user.changeNick(data["nick"]) user.nickSince = data["time"] return True cmd_nick = NickCommand()
Implement NICK command for both users and servers
Implement NICK command for both users and servers
Python
bsd-3-clause
ElementalAlchemist/txircd,Heufneutje/txircd
--- +++ @@ -0,0 +1,100 @@ +from twisted.plugin import IPlugin +from twisted.words.protocols import irc +from txircd.module_interface import Command, ICommand, IModuleData, ModuleData +from txircd.utils import ircLower, isValidNick +from zope.interface import implements +from datetime import datetime + +class NickCommand(ModuleData): + implements(IPlugin, IModuleData) + + name = "NickCommand" + core = True + + def hookIRCd(self, ircd): + self.ircd = ircd + + def userCommands(self): + return [ ("NICK", 1, NickUserCommand(self.ircd)) ] + + def serverCommands(self): + return [ ("NICK", 1, NickServerCommand(self.ircd)) ] + +class NickUserCommand(Command): + implements(ICommand) + + forRegisteredUsers = None + + def __init__(self, ircd): + self.ircd = ircd + + def parseParams(self, user, params, prefix, tags): + if not params or not params[0]: + user.sendMessage(irc.ERR_NEEDMOREPARAMS, "NICK", ":Not enough parameters") + return None + if not isValidNick(params[0]): + user.sendMessage(irc.ERR_ERRONEUSNICKNAME, params[0], ":Erroneous nickname") + return None + if params[0] in self.ircd.userNicks: + otherUser = self.ircd.users[self.ircd.userNicks] + if user != otherUser: + user.sendMessage(irc.ERR_NICKNAMEINUSE, nick, ":Nickname is already in use") + return None + return { + "nick": params[0] + } + + def execute(self, user, data): + user.changeNick(data["nick"]) + if not user.isRegistered(): + user.register("NICK") + return True + +class NickServerCommand(Command): + implements(ICommand) + + def __init__(self, ircd): + self.ircd = ircd + + def parseParams(self, server, params, prefix, tags): + if len(params) != 2: + return None + if prefix not in self.ircd.users: + self.disconnect("Desync: User list") + return None + user = self.ircd.users[prefix] + try: + time = datetime.utcfromtimestamp(params[0]) + except ValueError: + return None + if params[1] in self.ircd.userNicks: + localUser = self.ircd.users[self.ircd.userNicks[params[1]]] + if localUser != user: + if localUser.localOnly: + if "localnickcollision" in self.ircd.actions: + for action in self.ircd.actions["localnickcollision"]: + if action[0](user): + break + else: + return None + return { + "user": user, + "time": time, + "nick": params[1] + } + return None + self.disconnect("Desync: User data (nicknames)") + return None + return { + "user": user, + "time": time, + "nick": params[1] + } + + def execute(self, server, data): + user = data["user"] + user.changeNick(data["nick"]) + user.nickSince = data["time"] + return True + +cmd_nick = NickCommand()
0f9b7486d7f396598f32148422588da66c23477e
backend/breach/migrations/0008_auto_20160314_2049.py
backend/breach/migrations/0008_auto_20160314_2049.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-03-14 20:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('breach', '0007_auto_20160309_1802'), ] operations = [ migrations.AlterField( model_name='round', name='amount', field=models.IntegerField(default=1, help_text='Number of samples contained in each sampleset of this round.'), ), migrations.AlterField( model_name='sampleset', name='completed', field=models.DateTimeField(blank=True, default=None, help_text='When we stopped collecting samples for this sampleset, successfully or not', null=True), ), migrations.AlterField( model_name='sampleset', name='started', field=models.DateTimeField(blank=True, default=None, help_text='Date and time at which sample set collection was started', null=True), ), ]
Allow unstarted/incomplete samplesets in db
Allow unstarted/incomplete samplesets in db
Python
mit
dionyziz/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dimriou/rupture,dimriou/rupture,dimkarakostas/rupture,esarafianou/rupture,dimriou/rupture,dimkarakostas/rupture,dimkarakostas/rupture,dionyziz/rupture,esarafianou/rupture,dionyziz/rupture,dionyziz/rupture,esarafianou/rupture,dimriou/rupture
--- +++ @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.2 on 2016-03-14 20:49 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('breach', '0007_auto_20160309_1802'), + ] + + operations = [ + migrations.AlterField( + model_name='round', + name='amount', + field=models.IntegerField(default=1, help_text='Number of samples contained in each sampleset of this round.'), + ), + migrations.AlterField( + model_name='sampleset', + name='completed', + field=models.DateTimeField(blank=True, default=None, help_text='When we stopped collecting samples for this sampleset, successfully or not', null=True), + ), + migrations.AlterField( + model_name='sampleset', + name='started', + field=models.DateTimeField(blank=True, default=None, help_text='Date and time at which sample set collection was started', null=True), + ), + ]
0b44d2f2f99426cd2385b881c721f64979fb3d92
src/collectors/users/test/testusers.py
src/collectors/users/test/testusers.py
#!/usr/bin/python # coding=utf-8 ################################################################################ from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import Mock from mock import patch from diamond.collector import Collector from users import UsersCollector import sys ################################################################################ class TestUsersCollector(CollectorTestCase): def setUp(self): config = get_collector_config('UsersCollector', { 'utmp': self.getFixturePath('utmp.centos6'), }) self.collector = UsersCollector(config, None) @patch.object(Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): # Because of the compiled nature of pyutmp, we can't actually test # different operating system versions then the currently running # one if sys.platform.startswith('linux'): self.collector.collect() metrics = { 'kormoc': 2, 'root': 3, 'total': 5, } self.setDocExample(self.collector.__class__.__name__, metrics) self.assertPublishedMany(publish_mock, metrics) ################################################################################ if __name__ == "__main__": unittest.main()
#!/usr/bin/python # coding=utf-8 ################################################################################ from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import Mock from mock import patch from diamond.collector import Collector from users import UsersCollector import sys ################################################################################ class TestUsersCollector(CollectorTestCase): def setUp(self): config = get_collector_config('UsersCollector', { 'utmp': self.getFixturePath('utmp.centos6'), }) self.collector = UsersCollector(config, None) @patch.object(Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): metrics = { 'kormoc': 2, 'root': 3, 'total': 5, } self.setDocExample(self.collector.__class__.__name__, metrics) # Because of the compiled nature of pyutmp, we can't actually test # different operating system versions then the currently running # one if sys.platform.startswith('linux'): self.collector.collect() self.assertPublishedMany(publish_mock, metrics) ################################################################################ if __name__ == "__main__": unittest.main()
Set the docs no matter if we run the test on this platform or not
Set the docs no matter if we run the test on this platform or not
Python
mit
jumping/Diamond,krbaker/Diamond,sebbrandt87/Diamond,Clever/Diamond,codepython/Diamond,CYBERBUGJR/Diamond,szibis/Diamond,gg7/diamond,zoidbergwill/Diamond,bmhatfield/Diamond,TAKEALOT/Diamond,TinLe/Diamond,Ormod/Diamond,tuenti/Diamond,dcsquared13/Diamond,szibis/Diamond,ceph/Diamond,jaingaurav/Diamond,tusharmakkar08/Diamond,Basis/Diamond,Netuitive/netuitive-diamond,jriguera/Diamond,tusharmakkar08/Diamond,hvnsweeting/Diamond,mfriedenhagen/Diamond,TinLe/Diamond,dcsquared13/Diamond,CYBERBUGJR/Diamond,jriguera/Diamond,MediaMath/Diamond,Clever/Diamond,skbkontur/Diamond,Ormod/Diamond,disqus/Diamond,mfriedenhagen/Diamond,stuartbfox/Diamond,EzyInsights/Diamond,ramjothikumar/Diamond,EzyInsights/Diamond,saucelabs/Diamond,actmd/Diamond,metamx/Diamond,zoidbergwill/Diamond,MichaelDoyle/Diamond,Precis/Diamond,disqus/Diamond,Slach/Diamond,Slach/Diamond,signalfx/Diamond,jriguera/Diamond,mzupan/Diamond,hvnsweeting/Diamond,janisz/Diamond-1,Precis/Diamond,eMerzh/Diamond-1,socialwareinc/Diamond,Precis/Diamond,Ssawa/Diamond,szibis/Diamond,skbkontur/Diamond,Nihn/Diamond-1,Basis/Diamond,jaingaurav/Diamond,tuenti/Diamond,Netuitive/Diamond,thardie/Diamond,cannium/Diamond,gg7/diamond,ceph/Diamond,rtoma/Diamond,hvnsweeting/Diamond,gg7/diamond,russss/Diamond,Ensighten/Diamond,russss/Diamond,tusharmakkar08/Diamond,krbaker/Diamond,Netuitive/Diamond,thardie/Diamond,metamx/Diamond,MichaelDoyle/Diamond,socialwareinc/Diamond,works-mobile/Diamond,mzupan/Diamond,metamx/Diamond,skbkontur/Diamond,szibis/Diamond,stuartbfox/Diamond,rtoma/Diamond,python-diamond/Diamond,jumping/Diamond,timchenxiaoyu/Diamond,rtoma/Diamond,mfriedenhagen/Diamond,hamelg/Diamond,hamelg/Diamond,CYBERBUGJR/Diamond,TAKEALOT/Diamond,stuartbfox/Diamond,datafiniti/Diamond,MediaMath/Diamond,Nihn/Diamond-1,Ormod/Diamond,MichaelDoyle/Diamond,h00dy/Diamond,signalfx/Diamond,works-mobile/Diamond,datafiniti/Diamond,jaingaurav/Diamond,joel-airspring/Diamond,zoidbergwill/Diamond,Netuitive/Diamond,hamelg/Diamond,mzupan/Diamond,janisz/Diamond-1,actmd/Diamond,eMerzh/Diamond-1,timchenxiaoyu/Diamond,codepython/Diamond,works-mobile/Diamond,MediaMath/Diamond,Slach/Diamond,tellapart/Diamond,dcsquared13/Diamond,timchenxiaoyu/Diamond,Nihn/Diamond-1,codepython/Diamond,TAKEALOT/Diamond,mzupan/Diamond,tuenti/Diamond,Ssawa/Diamond,sebbrandt87/Diamond,h00dy/Diamond,hamelg/Diamond,janisz/Diamond-1,Nihn/Diamond-1,skbkontur/Diamond,joel-airspring/Diamond,jumping/Diamond,saucelabs/Diamond,krbaker/Diamond,Precis/Diamond,TinLe/Diamond,tusharmakkar08/Diamond,Ensighten/Diamond,datafiniti/Diamond,Slach/Diamond,acquia/Diamond,bmhatfield/Diamond,codepython/Diamond,Netuitive/netuitive-diamond,python-diamond/Diamond,MichaelDoyle/Diamond,krbaker/Diamond,russss/Diamond,tellapart/Diamond,janisz/Diamond-1,acquia/Diamond,ceph/Diamond,h00dy/Diamond,tellapart/Diamond,acquia/Diamond,python-diamond/Diamond,eMerzh/Diamond-1,acquia/Diamond,russss/Diamond,gg7/diamond,works-mobile/Diamond,Netuitive/netuitive-diamond,bmhatfield/Diamond,CYBERBUGJR/Diamond,jumping/Diamond,timchenxiaoyu/Diamond,joel-airspring/Diamond,MediaMath/Diamond,anandbhoraskar/Diamond,stuartbfox/Diamond,ramjothikumar/Diamond,Netuitive/Diamond,EzyInsights/Diamond,hvnsweeting/Diamond,h00dy/Diamond,zoidbergwill/Diamond,dcsquared13/Diamond,disqus/Diamond,Clever/Diamond,tellapart/Diamond,Ensighten/Diamond,Netuitive/netuitive-diamond,joel-airspring/Diamond,Basis/Diamond,jriguera/Diamond,ramjothikumar/Diamond,cannium/Diamond,signalfx/Diamond,datafiniti/Diamond,rtoma/Diamond,Ssawa/Diamond,saucelabs/Diamond,bmhatfield/Diamond,anandbhoraskar/Diamond,EzyInsights/Diamond,TinLe/Diamond,anandbhoraskar/Diamond,cannium/Diamond,ceph/Diamond,Ssawa/Diamond,Basis/Diamond,TAKEALOT/Diamond,jaingaurav/Diamond,sebbrandt87/Diamond,thardie/Diamond,cannium/Diamond,Ensighten/Diamond,mfriedenhagen/Diamond,Clever/Diamond,socialwareinc/Diamond,socialwareinc/Diamond,sebbrandt87/Diamond,anandbhoraskar/Diamond,signalfx/Diamond,tuenti/Diamond,Ormod/Diamond,ramjothikumar/Diamond,saucelabs/Diamond,thardie/Diamond,eMerzh/Diamond-1,actmd/Diamond,actmd/Diamond
--- +++ @@ -28,19 +28,20 @@ @patch.object(Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): + metrics = { + 'kormoc': 2, + 'root': 3, + 'total': 5, + } + + self.setDocExample(self.collector.__class__.__name__, metrics) + # Because of the compiled nature of pyutmp, we can't actually test # different operating system versions then the currently running # one if sys.platform.startswith('linux'): self.collector.collect() - metrics = { - 'kormoc': 2, - 'root': 3, - 'total': 5, - } - - self.setDocExample(self.collector.__class__.__name__, metrics) self.assertPublishedMany(publish_mock, metrics) ################################################################################
6c1d1c0662a0ae05dcfbb55484164a302bf5e0d3
tests/test_cl_json.py
tests/test_cl_json.py
from kqml import cl_json, KQMLList def test_parse(): json_dict = {'a': 1, 'b': 2, 'c': ['foo', {'bar': None, 'done': False}], 'this is json': True} res = cl_json.parse_json(json_dict) assert isinstance(res, KQMLList) assert len(res) == 2*len(json_dict.keys())
Add a test of the parser.
Add a test of the parser.
Python
bsd-2-clause
bgyori/pykqml
--- +++ @@ -0,0 +1,10 @@ +from kqml import cl_json, KQMLList + + +def test_parse(): + json_dict = {'a': 1, 'b': 2, + 'c': ['foo', {'bar': None, 'done': False}], + 'this is json': True} + res = cl_json.parse_json(json_dict) + assert isinstance(res, KQMLList) + assert len(res) == 2*len(json_dict.keys())
74837658fc50dc26278e3a2a56ddb0645c5fde2c
lexgen/utils.py
lexgen/utils.py
import math def percentile(values, percent, key=lambda x: x): """ Find the percentile of a list of values. Params: values (list): Sorted list of values. percent (float): A value from 0.0 to 1.0. key (function): Optional key function to compute value from each value on list. Returns: The percentile of the values. """ if not values: return None k = (len(values) - 1) * percent floor = math.floor(k) ceil = math.ceil(k) if floor == ceil: return key(values[int(k)]) d0 = key(values[int(floor)]) * (ceil - k) d1 = key(values[int(ceil)]) * (k - floor) return d0 + d1 def filter_dict_by_iqr(dictionary): """ Returns a new dictionary filtering values outside of the interquartile range. Params: dictionary (dict): Dictionary to be filtered. Returns: A new dictionary without items outside of the interquartile range. """ filtered_dict = {} values = sorted(set(dictionary.values())) first_quartile = percentile(values, 0.25) second_quartile = percentile(values, 0.75) for key in dictionary: if first_quartile <= dictionary[key] <= second_quartile: filtered_dict[key] = dictionary[key] return filtered_dict
Add two functions to calculate percentiles and filter a dict using IQR
Add two functions to calculate percentiles and filter a dict using IQR The idea is to get a dictionary with a tweets count for each user and filter that users whose number of tweets is not inside the interquartile range.
Python
mit
davidmogar/lexgen,davidmogar/lexgen
--- +++ @@ -0,0 +1,51 @@ +import math + + +def percentile(values, percent, key=lambda x: x): + """ + Find the percentile of a list of values. + + Params: + values (list): Sorted list of values. + percent (float): A value from 0.0 to 1.0. + key (function): Optional key function to compute value from each value on list. + + Returns: + The percentile of the values. + """ + if not values: + return None + + k = (len(values) - 1) * percent + floor = math.floor(k) + ceil = math.ceil(k) + if floor == ceil: + return key(values[int(k)]) + + d0 = key(values[int(floor)]) * (ceil - k) + d1 = key(values[int(ceil)]) * (k - floor) + + return d0 + d1 + + +def filter_dict_by_iqr(dictionary): + """ + Returns a new dictionary filtering values outside of the interquartile range. + + Params: + dictionary (dict): Dictionary to be filtered. + + Returns: + A new dictionary without items outside of the interquartile range. + """ + filtered_dict = {} + values = sorted(set(dictionary.values())) + + first_quartile = percentile(values, 0.25) + second_quartile = percentile(values, 0.75) + + for key in dictionary: + if first_quartile <= dictionary[key] <= second_quartile: + filtered_dict[key] = dictionary[key] + + return filtered_dict
8d19727e44d961d6bba263990cca954893782613
client/file_logging.py
client/file_logging.py
import logging import os import king_phisher.client.application as application import king_phisher.client.plugins as plugins import king_phisher.client.gui_utilities as gui_utilities # logger name value LOGGER_NAME = '' # log file size, in MB LOG_FILE_SIZE = 10 class Plugin(plugins.ClientPlugin): authors = ['Zach Janice'] title = 'Logger' description = """ Keep logs of campaign feedback and results. The directory of the logged file(s) is $HOME/.config/king-phisher. """ homepage = 'https://github.com/securestate/king-phisher-plugins' # this is the primary plugin entry point which is executed when the plugin is enabled def initialize(self): # ensure the directory for the logs exists log_dir = application.USER_DATA_PATH if not os.path.exists(log_dir): os.mkdir(log_dir) # convert the specified log file size (MB) to bytes for use by the logger file_size = LOG_FILE_SIZE * 1024 * 1024 # grab the logger in use by the client (root logger) logger = logging.getLogger(LOGGER_NAME) # set up the handler and formatter for the logger, and attach the components handler = logging.handlers.RotatingFileHandler(os.path.join(log_dir, 'client_log.log'), maxBytes=file_size, backupCount=2) formatter = logging.Formatter('%(asctime)s %(name)-50s %(levelname)-8s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) # keep reference of handler as an attribute self.handler = handler return True # this is a cleanup method to allow the plugin to close any open resources def finalize(self): # remove the logging handler from the logger and close it logger = logging.getLogger(LOGGER_NAME) logger.removeHandler(self.handler) self.handler.flush() self.handler.close()
Add a client plugin to log to files
Add a client plugin to log to files Using logging.VALUE instead of fixed value for levels Updated logger name value with root logger name Added basis for RotatingFileHandler with logger Added description of plugin Updated comment, attached components. Ready for initial testing? UNTESTED: Added file directory and assumed debug mode UNTESTED: Cleaned up rotating header, directory specification UNTESTED: Handler closing, more detailed formatting Renamed logging plugin Debugging changes; removal of options; works for initial tests Initial addition attempt of dir, file size, file count options Cleaned up sanitization, reporting of bad input. Tested Revisions made based on comments More revisions based on comments
Python
bsd-3-clause
securestate/king-phisher-plugins,zeroSteiner/king-phisher-plugins,zeroSteiner/king-phisher-plugins,securestate/king-phisher-plugins,wolfthefallen/king-phisher-plugins,wolfthefallen/king-phisher-plugins
--- +++ @@ -0,0 +1,54 @@ +import logging +import os + +import king_phisher.client.application as application +import king_phisher.client.plugins as plugins +import king_phisher.client.gui_utilities as gui_utilities + +# logger name value +LOGGER_NAME = '' + +# log file size, in MB +LOG_FILE_SIZE = 10 + +class Plugin(plugins.ClientPlugin): + authors = ['Zach Janice'] + title = 'Logger' + description = """ + Keep logs of campaign feedback and results. The directory + of the logged file(s) is $HOME/.config/king-phisher. + """ + homepage = 'https://github.com/securestate/king-phisher-plugins' + + # this is the primary plugin entry point which is executed when the plugin is enabled + def initialize(self): + # ensure the directory for the logs exists + log_dir = application.USER_DATA_PATH + if not os.path.exists(log_dir): + os.mkdir(log_dir) + + # convert the specified log file size (MB) to bytes for use by the logger + file_size = LOG_FILE_SIZE * 1024 * 1024 + + # grab the logger in use by the client (root logger) + logger = logging.getLogger(LOGGER_NAME) + + # set up the handler and formatter for the logger, and attach the components + handler = logging.handlers.RotatingFileHandler(os.path.join(log_dir, 'client_log.log'), maxBytes=file_size, backupCount=2) + formatter = logging.Formatter('%(asctime)s %(name)-50s %(levelname)-8s %(message)s') + handler.setFormatter(formatter) + logger.addHandler(handler) + + # keep reference of handler as an attribute + self.handler = handler + + return True + + # this is a cleanup method to allow the plugin to close any open resources + def finalize(self): + # remove the logging handler from the logger and close it + logger = logging.getLogger(LOGGER_NAME) + logger.removeHandler(self.handler) + self.handler.flush() + self.handler.close() +
3bdf1e98b3379fde17107fcdb1e32d9273a826b4
Lib/test/test_zipfile.py
Lib/test/test_zipfile.py
import zipfile, os srcname = "junk9630.tmp" zipname = "junk9708.tmp" try: fp = open(srcname, "w") # Make a source file with some lines for i in range(0, 1000): fp.write("Test of zipfile line %d.\n" % i) fp.close() zip = zipfile.ZipFile(zipname, "w") # Create the ZIP archive zip.write(srcname, srcname) zip.write(srcname, "another.name") zip.close() zip = zipfile.ZipFile(zipname, "r") # Read the ZIP archive zip.read("another.name") zip.read(srcname) zip.close() finally: if os.path.isfile(srcname): # Remove temporary files os.unlink(srcname) if os.path.isfile(zipname): os.unlink(zipname)
Test for zipfile.py, by Jim Ahlstrom.
Test for zipfile.py, by Jim Ahlstrom.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
--- +++ @@ -0,0 +1,26 @@ +import zipfile, os + +srcname = "junk9630.tmp" +zipname = "junk9708.tmp" + +try: + fp = open(srcname, "w") # Make a source file with some lines + for i in range(0, 1000): + fp.write("Test of zipfile line %d.\n" % i) + fp.close() + + zip = zipfile.ZipFile(zipname, "w") # Create the ZIP archive + zip.write(srcname, srcname) + zip.write(srcname, "another.name") + zip.close() + + zip = zipfile.ZipFile(zipname, "r") # Read the ZIP archive + zip.read("another.name") + zip.read(srcname) + zip.close() +finally: + if os.path.isfile(srcname): # Remove temporary files + os.unlink(srcname) + if os.path.isfile(zipname): + os.unlink(zipname) +
4bd4e7f459eee610d5cf19f845299ca942ff4b64
python/datetime_timezone.py
python/datetime_timezone.py
#!/usr/bin/env python # coding: utf-8 import datetime # UTC ################################# # Naive print(datetime.datetime.utcnow()) # Aware print(datetime.datetime.now().astimezone(datetime.timezone.utc)) # Local ############################### # Naive print(datetime.datetime.now()) # Aware print(datetime.datetime.now().astimezone(tz=None)) # Local timezone print(datetime.datetime.now().astimezone(tz=None).tzinfo)
Add a snippet (python datetime timezones).
Add a snippet (python datetime timezones).
Python
mit
jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets
--- +++ @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# coding: utf-8 + +import datetime + + +# UTC ################################# + +# Naive +print(datetime.datetime.utcnow()) + +# Aware +print(datetime.datetime.now().astimezone(datetime.timezone.utc)) + +# Local ############################### + +# Naive +print(datetime.datetime.now()) + +# Aware +print(datetime.datetime.now().astimezone(tz=None)) + +# Local timezone +print(datetime.datetime.now().astimezone(tz=None).tzinfo)
888beffa2ab3362c23d85b73b388f67f534e29a0
skeleton/plugins/sprites.disabled.py
skeleton/plugins/sprites.disabled.py
import os import pipes import shutil import subprocess """ This plugin uses glue to sprite images: http://glue.readthedocs.org/en/latest/quickstart.html Install: (Only if you want to sprite jpg too) brew install libjpeg (Only if you want to optimize pngs with optipng) brew install optipng sudo easy_install pip sudo pip uninstall pil sudo pip install pil sudo pip install glue """ IMG_PATH = 'static/img/sprites' CSS_PATH = 'static/css/sprites' KEY = '_PREV_CHECKSUM' def checksum(path): command = 'md5 `find %s -type f`' % pipes.quote(IMG_PATH) return subprocess.check_output(command, shell=True) def preBuild(site): currChecksum = checksum(IMG_PATH) prevChecksum = getattr(site, KEY, None) # Don't run if none of the images has changed if currChecksum == prevChecksum: return shutil.rmtree(CSS_PATH) if os.path.isdir(CSS_PATH) os.mkdir(CSS_PATH) os.system('glue --cachebuster --crop --optipng "%s" "%s" --project' % (IMG_PATH, CSS_PATH)) setattr(site, KEY, currChecksum)
Add spriting plugin based on glue
Add spriting plugin based on glue
Python
bsd-3-clause
danielmorosan/Cactus,dreadatour/Cactus,PegasusWang/Cactus,page-io/Cactus,andyzsf/Cactus-,fjxhkj/Cactus,juvham/Cactus,Knownly/Cactus,Knownly/Cactus,fjxhkj/Cactus,Knownly/Cactus,koobs/Cactus,koenbok/Cactus,chaudum/Cactus,eudicots/Cactus,dreadatour/Cactus,chaudum/Cactus,eudicots/Cactus,ibarria0/Cactus,andyzsf/Cactus-,koenbok/Cactus,juvham/Cactus,ibarria0/Cactus,danielmorosan/Cactus,Bluetide/Cactus,danielmorosan/Cactus,Bluetide/Cactus,gone/Cactus,PegasusWang/Cactus,koobs/Cactus,gone/Cactus,koenbok/Cactus,chaudum/Cactus,PegasusWang/Cactus,eudicots/Cactus,page-io/Cactus,ibarria0/Cactus,andyzsf/Cactus-,page-io/Cactus,juvham/Cactus,koobs/Cactus,fjxhkj/Cactus,dreadatour/Cactus,Bluetide/Cactus,gone/Cactus
--- +++ @@ -0,0 +1,47 @@ +import os +import pipes +import shutil +import subprocess + +""" +This plugin uses glue to sprite images: +http://glue.readthedocs.org/en/latest/quickstart.html + +Install: + +(Only if you want to sprite jpg too) +brew install libjpeg + +(Only if you want to optimize pngs with optipng) +brew install optipng + +sudo easy_install pip +sudo pip uninstall pil +sudo pip install pil +sudo pip install glue +""" + +IMG_PATH = 'static/img/sprites' +CSS_PATH = 'static/css/sprites' + +KEY = '_PREV_CHECKSUM' + +def checksum(path): + command = 'md5 `find %s -type f`' % pipes.quote(IMG_PATH) + return subprocess.check_output(command, shell=True) + +def preBuild(site): + + currChecksum = checksum(IMG_PATH) + prevChecksum = getattr(site, KEY, None) + + # Don't run if none of the images has changed + if currChecksum == prevChecksum: + return + + shutil.rmtree(CSS_PATH) if os.path.isdir(CSS_PATH) + os.mkdir(CSS_PATH) + + os.system('glue --cachebuster --crop --optipng "%s" "%s" --project' % (IMG_PATH, CSS_PATH)) + + setattr(site, KEY, currChecksum)
a31d112ab188755a6d843599c1472334abcefd3b
src/zeit/workflow/tests/test_timebased.py
src/zeit/workflow/tests/test_timebased.py
import datetime import mock import pytz import transaction import zeit.content.article.cds import zeit.content.article.testing class TimeBasedWorkflowTest(zeit.cms.testing.FunctionalTestCase): layer = zeit.cms.testing.ZCML_LAYER def test_add_job_calls_async_celery_task_with_delay_for_future_execution( self): workflow = zeit.workflow.timebased.TimeBasedWorkflow( zeit.cms.interfaces.ICMSContent('http://xml.zeit.de/testcontent')) with zeit.cms.testing.site(self.getRootFolder()): with mock.patch( 'celery.Task.apply_async') as apply_async: workflow.add_job( zeit.workflow.publish.PUBLISH_TASK, datetime.datetime.now(pytz.UTC) + datetime.timedelta(1)) transaction.commit() self.assertIn('countdown', apply_async.call_args[1])
Add test to ensure timebased jobs are called with delay.
ZON-3409: Add test to ensure timebased jobs are called with delay.
Python
bsd-3-clause
ZeitOnline/zeit.cms,ZeitOnline/zeit.cms,ZeitOnline/zeit.cms,ZeitOnline/zeit.cms
--- +++ @@ -0,0 +1,26 @@ +import datetime +import mock +import pytz +import transaction +import zeit.content.article.cds +import zeit.content.article.testing + + +class TimeBasedWorkflowTest(zeit.cms.testing.FunctionalTestCase): + + layer = zeit.cms.testing.ZCML_LAYER + + def test_add_job_calls_async_celery_task_with_delay_for_future_execution( + self): + workflow = zeit.workflow.timebased.TimeBasedWorkflow( + zeit.cms.interfaces.ICMSContent('http://xml.zeit.de/testcontent')) + with zeit.cms.testing.site(self.getRootFolder()): + with mock.patch( + 'celery.Task.apply_async') as apply_async: + workflow.add_job( + zeit.workflow.publish.PUBLISH_TASK, + datetime.datetime.now(pytz.UTC) + datetime.timedelta(1)) + + transaction.commit() + + self.assertIn('countdown', apply_async.call_args[1])
1ddec2ec4cae3d200f56a58f2de48334ab3d4af2
CodeFights/correctLineup.py
CodeFights/correctLineup.py
#!/usr/local/bin/python # Code Fights Correct Lineup Problem def correctLineup(athletes): return [a for t in zip(athletes[1::2], athletes[::2]) for a in t] def main(): tests = [ [[1, 2, 3, 4, 5, 6], [2, 1, 4, 3, 6, 5]], [[13, 42], [42, 13]], [[2, 3, 1, 100, 99, 45, 22, 28], [3, 2, 100, 1, 45, 99, 28, 22]], [[85, 32, 45, 67, 32, 12, 45, 67], [32, 85, 67, 45, 12, 32, 67, 45]], [[60, 2, 24, 40], [2, 60, 40, 24]] ] for t in tests: res = correctLineup(t[0]) ans = t[1] if ans == res: print("PASSED: correctLineup({}) returned {}" .format(t[0], res)) else: print(("FAILED: correctLineup({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights correct lineup problem
Solve Code Fights correct lineup problem
Python
mit
HKuz/Test_Code
--- +++ @@ -0,0 +1,30 @@ +#!/usr/local/bin/python +# Code Fights Correct Lineup Problem + + +def correctLineup(athletes): + return [a for t in zip(athletes[1::2], athletes[::2]) for a in t] + + +def main(): + tests = [ + [[1, 2, 3, 4, 5, 6], [2, 1, 4, 3, 6, 5]], + [[13, 42], [42, 13]], + [[2, 3, 1, 100, 99, 45, 22, 28], [3, 2, 100, 1, 45, 99, 28, 22]], + [[85, 32, 45, 67, 32, 12, 45, 67], [32, 85, 67, 45, 12, 32, 67, 45]], + [[60, 2, 24, 40], [2, 60, 40, 24]] + ] + + for t in tests: + res = correctLineup(t[0]) + ans = t[1] + if ans == res: + print("PASSED: correctLineup({}) returned {}" + .format(t[0], res)) + else: + print(("FAILED: correctLineup({}) returned {}," + "answer: {}").format(t[0], res, ans)) + + +if __name__ == '__main__': + main()
cd0e32e21c315e888e351c5266c38195294450a3
drivers.py
drivers.py
import readers import filtering import writers def merge_multiple_fractions(fns): """Performs the work to merge parallelized percolator fractions. Target/decoy split, filtering unique peptides, running qvality on resulting score distributions for psms and peptides and setting values.""" pass def split_target_decoy(fn, targetfn='target.xml', decoyfn='decoy.xml'): """ Calls splitter to split percolator output into target/decoy elements. Writes two new xml files with features. Currently only psms and peptides. Proteins not here, since one cannot do protein inference before having merged and remapped multifraction data anyway. """ namespace = readers.get_namespace(fn) static_xml = readers.get_percolator_static_xml(fn, namespace) split_elements = filtering.split_target_decoy(fn, namespace) writers.write_percolator_xml(static_xml, split_elements['target'], targetfn) writers.write_percolator_xml(static_xml, split_elements['decoy'], decoyfn) def merge_filter_unique_peptides(fns, score): """Make sure fractions are from same percolator run.""" psm_generators = [] namespace = readers.get_namespace(fns[0]) for fn in fns: psm_generators.append(readers.get_psms(fn, namespace)) filtering.filter_unique_peptides(fns, score, namespace)
Put writing code in own module
Put writing code in own module
Python
mit
glormph/msstitch
--- +++ @@ -0,0 +1,34 @@ +import readers +import filtering +import writers + + +def merge_multiple_fractions(fns): + """Performs the work to merge parallelized percolator fractions. + Target/decoy split, filtering unique peptides, running qvality on resulting + score distributions for psms and peptides and setting values.""" + pass + + +def split_target_decoy(fn, targetfn='target.xml', decoyfn='decoy.xml'): + """ Calls splitter to split percolator output into target/decoy elements. + Writes two new xml files with features. Currently only psms and + peptides. Proteins not here, since one cannot do protein inference + before having merged and remapped multifraction data anyway. + """ + namespace = readers.get_namespace(fn) + static_xml = readers.get_percolator_static_xml(fn, namespace) + split_elements = filtering.split_target_decoy(fn, namespace) + writers.write_percolator_xml(static_xml, split_elements['target'], targetfn) + writers.write_percolator_xml(static_xml, split_elements['decoy'], decoyfn) + + +def merge_filter_unique_peptides(fns, score): + """Make sure fractions are from same + percolator run.""" + psm_generators = [] + namespace = readers.get_namespace(fns[0]) + for fn in fns: + psm_generators.append(readers.get_psms(fn, namespace)) + filtering.filter_unique_peptides(fns, score, namespace) +
3c15b0ab1a7b3b8dd3df124bd687c024e8ee28a5
taiga/projects/migrations/0044_merge.py
taiga/projects/migrations/0044_merge.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-05-30 16:36 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('projects', '0043_auto_20160530_1004'), ('projects', '0042_auto_20160525_0911'), ] operations = [ ]
Create a merge migration to fix the problem between master and stable branches
Create a merge migration to fix the problem between master and stable branches
Python
agpl-3.0
dayatz/taiga-back,taigaio/taiga-back,taigaio/taiga-back,dayatz/taiga-back,taigaio/taiga-back,xdevelsistemas/taiga-back-community,dayatz/taiga-back,xdevelsistemas/taiga-back-community,xdevelsistemas/taiga-back-community
--- +++ @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.2 on 2016-05-30 16:36 +from __future__ import unicode_literals + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0043_auto_20160530_1004'), + ('projects', '0042_auto_20160525_0911'), + ] + + operations = [ + ]
8d000ebe16657f5cbe7fdf06ddd91322f141fb11
accounting/apps/books/models.py
accounting/apps/books/models.py
from django.conf import settings from django.db import models from django.contrib.auth.models import AbstractUser class User(AbstractUser): pass class Organization(models.Model): display_name = models.CharField(max_length=150, help_text="Name that you communicate") legal_name = models.CharField(max_length=150, help_text="Official name to appear on your reports, sales " "invoices and bills") members = models.ManyToManyField(settings.AUTH_USER_MODEL)
from django.conf import settings from django.db import models from django.contrib.auth.models import AbstractUser class User(AbstractUser): pass class Organization(models.Model): display_name = models.CharField(max_length=150, help_text="Name that you communicate") legal_name = models.CharField(max_length=150, help_text="Official name to appear on your reports, sales " "invoices and bills") members = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True, null=True)
Allow no members for creating an organization
Allow no members for creating an organization
Python
mit
kenjhim/django-accounting,dulaccc/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,dulaccc/django-accounting,kenjhim/django-accounting,dulaccc/django-accounting,kenjhim/django-accounting
--- +++ @@ -14,4 +14,5 @@ help_text="Official name to appear on your reports, sales " "invoices and bills") - members = models.ManyToManyField(settings.AUTH_USER_MODEL) + members = models.ManyToManyField(settings.AUTH_USER_MODEL, + blank=True, null=True)
100f4dc9f81728db3ae3a1c73ace92e52d46a4d4
django_afip/migrations/0014_no_partially_validated_receiptvalidations.py
django_afip/migrations/0014_no_partially_validated_receiptvalidations.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-06-04 17:17 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('afip', '0013_taxpayer_is_sandboxed'), ] operations = [ migrations.AlterField( model_name='receiptvalidation', name='result', field=models.CharField(choices=[('A', 'approved'), ('R', 'rejected')], max_length=1, verbose_name='result'), ), ]
Add missing validation (see 58227d2)
Add missing validation (see 58227d2)
Python
isc
hobarrera/django-afip,hobarrera/django-afip
--- +++ @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.2 on 2016-06-04 17:17 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('afip', '0013_taxpayer_is_sandboxed'), + ] + + operations = [ + migrations.AlterField( + model_name='receiptvalidation', + name='result', + field=models.CharField(choices=[('A', 'approved'), ('R', 'rejected')], max_length=1, verbose_name='result'), + ), + ]
68dfe9c86ec1d2042b3f1eef21738300a3c6caf2
arxiv_vanity/papers/management/commands/mark_failed_renders_as_expired.py
arxiv_vanity/papers/management/commands/mark_failed_renders_as_expired.py
from django.core.management.base import BaseCommand, CommandError from ...models import Render class Command(BaseCommand): help = 'Marks all renders as expired so they will be rerendered' def handle(self, *args, **options): qs = Render.objects.defer("container_inspect", "container_logs").failed().not_expired() qs.force_expire() print(f"Done")
Add command to mark failed renders as expired
Add command to mark failed renders as expired
Python
apache-2.0
arxiv-vanity/arxiv-vanity,arxiv-vanity/arxiv-vanity,arxiv-vanity/arxiv-vanity,arxiv-vanity/arxiv-vanity
--- +++ @@ -0,0 +1,11 @@ +from django.core.management.base import BaseCommand, CommandError +from ...models import Render + + +class Command(BaseCommand): + help = 'Marks all renders as expired so they will be rerendered' + + def handle(self, *args, **options): + qs = Render.objects.defer("container_inspect", "container_logs").failed().not_expired() + qs.force_expire() + print(f"Done")
0ca93d94d224b5cdf926de584ee9512bc084dc4f
examples/visualization/show_2d_complex.py
examples/visualization/show_2d_complex.py
# Copyright 2014-2016 The ODL development group # # This file is part of ODL. # # ODL is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ODL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ODL. If not, see <http://www.gnu.org/licenses/>. """Examples on using the vector.show() syntax NOTES ----- The behaviour of blocking shows etc in matplotlib is experimental and can cause issues with these examples. """ import odl spc = odl.uniform_discr([0, 0], [1, 1], [100, 100], field=odl.ComplexNumbers()) vec = odl.util.shepp_logan(spc, modified=True) * (1 + 0.5j) # Can also force "instant" plotting vec.show(show=True)
Add 2d visualization example with complex data
ENH: Add 2d visualization example with complex data
Python
mpl-2.0
kohr-h/odl,odlgroup/odl,aringh/odl,odlgroup/odl,aringh/odl,kohr-h/odl
--- +++ @@ -0,0 +1,32 @@ +# Copyright 2014-2016 The ODL development group +# +# This file is part of ODL. +# +# ODL is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# ODL is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with ODL. If not, see <http://www.gnu.org/licenses/>. + +"""Examples on using the vector.show() syntax + +NOTES +----- +The behaviour of blocking shows etc in matplotlib is experimental and can cause +issues with these examples. +""" + +import odl + +spc = odl.uniform_discr([0, 0], [1, 1], [100, 100], field=odl.ComplexNumbers()) +vec = odl.util.shepp_logan(spc, modified=True) * (1 + 0.5j) + +# Can also force "instant" plotting +vec.show(show=True)
bd23cb0214ce0a3eb14b069599f4bded8bd2b26a
analysis/compress-jacobians.py
analysis/compress-jacobians.py
#!/usr/bin/env python import climate import joblib import lmj.pca import database def jac(trial): trial.load() cols = [c for c in trial.df.columns if c.startswith('jac-fwd')] return trial.df[cols].values def main(root, pattern='*'): trials = database.Experiment(root).trials_matching(pattern) proc = joblib.delayed(jac) jacobians = [] for jacs in joblib.Parallel(-2)(proc(t) for t in trials): jacobians.extend(jacs) print(len(jacobians)) if __name__ == '__main__': climate.call(main)
Add script for pca-ing jacobians.
Add script for pca-ing jacobians.
Python
mit
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
--- +++ @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +import climate +import joblib +import lmj.pca + +import database + + +def jac(trial): + trial.load() + cols = [c for c in trial.df.columns if c.startswith('jac-fwd')] + return trial.df[cols].values + + +def main(root, pattern='*'): + trials = database.Experiment(root).trials_matching(pattern) + proc = joblib.delayed(jac) + jacobians = [] + for jacs in joblib.Parallel(-2)(proc(t) for t in trials): + jacobians.extend(jacs) + print(len(jacobians)) + + +if __name__ == '__main__': + climate.call(main)
d619307b77851e014cabc3e864e4c11dfea7764d
integration-test/977-min-zoom-from-ne-join.py
integration-test/977-min-zoom-from-ne-join.py
# -*- coding: utf-8 -*- from . import FixtureTest class MinZoomFromNETest(FixtureTest): def setUp(self): import dsl super(MinZoomFromNETest, self).setUp() self.lon, self.lat = (-3.2765753, 54.7023545) self.generate_fixtures( # https://www.openstreetmap.org/node/838090640 dsl.point(838090640, (self.lon, self.lat), { 'name': u'United Kingdom', 'place': u'country', 'population': u'61792000', 'source': u'openstreetmap.org', 'wikidata': u'Q145', 'wikipedia': u'de:United Kingdom', # LOL, de: # NOTE: these aren't in the data from OSM, but are joined at # database query time from the Natural Earth data. '__ne_min_zoom': 1.7, '__ne_max_zoom': 6.7, }), ) def test_uk_should_show_up_zooms_1_to_6(self): from tilequeue.tile import deg2num # should show up in zooms within the range 1-6 for zoom in xrange(1, 6): x, y = deg2num(self.lat, self.lon, zoom) self.assert_has_feature( zoom, x, y, 'places', { 'id': 838090640, 'min_zoom': 1.7, 'max_zoom': 6.7, }) def test_uk_should_not_show_up_zoom_0(self): # shouldn't be in the zoom 0 tile because min_zoom >= 1 self.assert_no_matching_feature( 0, 0, 0, 'places', {'id': 838090640}) def test_uk_should_not_show_up_zoom_7(self): # shouldn't be in the zoom 0 tile because max_zoom < 7 from tilequeue.tile import deg2num zoom = 7 x, y = deg2num(self.lat, self.lon, zoom) self.assert_no_matching_feature( zoom, x, y, 'places', {'id': 838090640})
Add test for NE data min/max zoom join to places.
Add test for NE data min/max zoom join to places.
Python
mit
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
--- +++ @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +from . import FixtureTest + + +class MinZoomFromNETest(FixtureTest): + + def setUp(self): + import dsl + + super(MinZoomFromNETest, self).setUp() + + self.lon, self.lat = (-3.2765753, 54.7023545) + + self.generate_fixtures( + # https://www.openstreetmap.org/node/838090640 + dsl.point(838090640, (self.lon, self.lat), { + 'name': u'United Kingdom', + 'place': u'country', + 'population': u'61792000', + 'source': u'openstreetmap.org', + 'wikidata': u'Q145', + 'wikipedia': u'de:United Kingdom', # LOL, de: + # NOTE: these aren't in the data from OSM, but are joined at + # database query time from the Natural Earth data. + '__ne_min_zoom': 1.7, + '__ne_max_zoom': 6.7, + }), + ) + + def test_uk_should_show_up_zooms_1_to_6(self): + from tilequeue.tile import deg2num + # should show up in zooms within the range 1-6 + + for zoom in xrange(1, 6): + x, y = deg2num(self.lat, self.lon, zoom) + self.assert_has_feature( + zoom, x, y, 'places', { + 'id': 838090640, + 'min_zoom': 1.7, + 'max_zoom': 6.7, + }) + + def test_uk_should_not_show_up_zoom_0(self): + # shouldn't be in the zoom 0 tile because min_zoom >= 1 + self.assert_no_matching_feature( + 0, 0, 0, 'places', {'id': 838090640}) + + def test_uk_should_not_show_up_zoom_7(self): + # shouldn't be in the zoom 0 tile because max_zoom < 7 + from tilequeue.tile import deg2num + + zoom = 7 + x, y = deg2num(self.lat, self.lon, zoom) + self.assert_no_matching_feature( + zoom, x, y, 'places', {'id': 838090640})
7c8c6fb26dbf22e9fa09b1121683957123d9b903
14B-088/HI/imaging/HI_dirty_cube.py
14B-088/HI/imaging/HI_dirty_cube.py
''' Create a dirty HI cube for comparison and use in feathering. *Note*: Ran with casa-prerelease-5.0.0-187.el7 to take advantage of tclean's read-only mode, which speeds things up considerably. ''' import os from tasks import tclean, impbcor # CASA init should have the VLA_Lband repo appended to the path from paths import data_path # The full MS isn't in the same path as the usual data products. full_path = os.path.join(data_path, "../VLA/14B-088/Lines/HI/") output_path = os.path.join(full_path, "dirty_cube") if not os.path.exists(output_path): os.mkdir(output_path) # Image ALL channels in the continuum subtracted MS (~2000). # Keep the same spatial settings as is used for the cleaned cubes. tclean(vis=os.path.join(full_path, '14B-088_HI.ms.contsub'), datacolumn='data', imagename=os.path.join(output_path, 'M33_14B-088_HI.dirty'), field='M33*', imsize=[2560, 2560], cell='3arcsec', specmode='cube', start=1, width=1, nchan=2001, startmodel=None, gridder='mosaic', weighting='natural', niter=0, threshold='3.2mJy/beam', phasecenter='J2000 01h33m50.904 +30d39m35.79', restfreq='1420.40575177MHz', outframe='LSRK', pblimit=0.1, usemask='pb', mask=None, deconvolver='hogbom', pbcor=False, chanchunks=-1 ) # Apply pb correction impbcor(imagename=os.path.join(output_path, 'M33_14B-088_HI.dirty.image'), pbimage=os.path.join(output_path, 'M33_14B-088_HI.dirty.pb'), outfile=os.path.join(output_path, 'M33_14B-088_HI.dirty.image,pbcor'))
Create a dirty cubes of the whole HI 14B-088 data
Create a dirty cubes of the whole HI 14B-088 data
Python
mit
e-koch/VLA_Lband,e-koch/VLA_Lband
--- +++ @@ -0,0 +1,57 @@ + +''' +Create a dirty HI cube for comparison and use in feathering. + +*Note*: Ran with casa-prerelease-5.0.0-187.el7 to take advantage of tclean's +read-only mode, which speeds things up considerably. +''' + +import os + +from tasks import tclean, impbcor + +# CASA init should have the VLA_Lband repo appended to the path +from paths import data_path + +# The full MS isn't in the same path as the usual data products. +full_path = os.path.join(data_path, "../VLA/14B-088/Lines/HI/") + +output_path = os.path.join(full_path, "dirty_cube") + +if not os.path.exists(output_path): + os.mkdir(output_path) + + +# Image ALL channels in the continuum subtracted MS (~2000). +# Keep the same spatial settings as is used for the cleaned cubes. + +tclean(vis=os.path.join(full_path, '14B-088_HI.ms.contsub'), + datacolumn='data', + imagename=os.path.join(output_path, 'M33_14B-088_HI.dirty'), + field='M33*', + imsize=[2560, 2560], + cell='3arcsec', + specmode='cube', + start=1, + width=1, + nchan=2001, + startmodel=None, + gridder='mosaic', + weighting='natural', + niter=0, + threshold='3.2mJy/beam', + phasecenter='J2000 01h33m50.904 +30d39m35.79', + restfreq='1420.40575177MHz', + outframe='LSRK', + pblimit=0.1, + usemask='pb', + mask=None, + deconvolver='hogbom', + pbcor=False, + chanchunks=-1 + ) + +# Apply pb correction +impbcor(imagename=os.path.join(output_path, 'M33_14B-088_HI.dirty.image'), + pbimage=os.path.join(output_path, 'M33_14B-088_HI.dirty.pb'), + outfile=os.path.join(output_path, 'M33_14B-088_HI.dirty.image,pbcor'))
6ce83f65f12fe02c4f9417c610322f21ef6c02c6
apps/plea/tests/test_timeout.py
apps/plea/tests/test_timeout.py
from django.test import TestCase from django.test.client import Client from django.conf import settings from importlib import import_module from ..views import PleaOnlineForms class TestTimeout(TestCase): def setUp(self): self.client = Client() # http://code.djangoproject.com/ticket/10899 settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file' engine = import_module(settings.SESSION_ENGINE) store = engine.SessionStore() store.save() self.session = store self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key def test_no_urn_no_refresh_headers(self): response = self.client.get('/plea/case/') self.assertEqual(response.has_header('Refresh'), False) def test_when_urn_has_refresh_headers(self): session = self.session session["case"] = {"urn": "51/AA/00000/00"} session.save() response = self.client.get('/plea/case/') wait = str(getattr(settings, "SESSION_COOKIE_AGE", 3600)); self.assertEqual(response.has_header('Refresh'), True) self.assertTrue("Refresh: " + wait + "; url=/session-timeout/" in response.serialize_headers())
Add unit tests for session timeout http headers
Add unit tests for session timeout http headers These tests check for the absence or presence of the session timeout redirect headers. [MAPDEV326]
Python
mit
ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas,ministryofjustice/manchester_traffic_offences_pleas
--- +++ @@ -0,0 +1,35 @@ +from django.test import TestCase +from django.test.client import Client +from django.conf import settings +from importlib import import_module + +from ..views import PleaOnlineForms + +class TestTimeout(TestCase): + + def setUp(self): + self.client = Client() + # http://code.djangoproject.com/ticket/10899 + settings.SESSION_ENGINE = 'django.contrib.sessions.backends.file' + engine = import_module(settings.SESSION_ENGINE) + store = engine.SessionStore() + store.save() + self.session = store + self.client.cookies[settings.SESSION_COOKIE_NAME] = store.session_key + + def test_no_urn_no_refresh_headers(self): + response = self.client.get('/plea/case/') + + self.assertEqual(response.has_header('Refresh'), False) + + def test_when_urn_has_refresh_headers(self): + session = self.session + session["case"] = {"urn": "51/AA/00000/00"} + session.save() + + response = self.client.get('/plea/case/') + + wait = str(getattr(settings, "SESSION_COOKIE_AGE", 3600)); + + self.assertEqual(response.has_header('Refresh'), True) + self.assertTrue("Refresh: " + wait + "; url=/session-timeout/" in response.serialize_headers())
d3ab0989bdeaf1aefb287b5bf6ae511c0441d370
printurls.py
printurls.py
# printurls.py - returns command for downloading CalISO "renewables watch" data. import datetime URL_FORMAT = "http://content.caiso.com/green/renewrpt/%Y%m%d_DailyRenewablesWatch.txt" START="2014/05/20" END="2014/05/30" DATEFORMAT="%Y/%m/%d" def daterange(s, e): for i in range((e - s).days): yield s + datetime.timedelta(i) cmd = "wget --directory-prefix=cache" for d in daterange(datetime.datetime.strptime(START, DATEFORMAT), datetime.datetime.strptime(END, DATEFORMAT)): cmd += " " cmd += d.strftime(URL_FORMAT) print cmd
Add a script for downloading all CalISO 30min generation data.
Add a script for downloading all CalISO 30min generation data.
Python
mit
gonzojive/cal-iso-daily-renewables
--- +++ @@ -0,0 +1,21 @@ +# printurls.py - returns command for downloading CalISO "renewables watch" data. + +import datetime + +URL_FORMAT = "http://content.caiso.com/green/renewrpt/%Y%m%d_DailyRenewablesWatch.txt" +START="2014/05/20" +END="2014/05/30" +DATEFORMAT="%Y/%m/%d" + +def daterange(s, e): + for i in range((e - s).days): + yield s + datetime.timedelta(i) + +cmd = "wget --directory-prefix=cache" + +for d in daterange(datetime.datetime.strptime(START, DATEFORMAT), + datetime.datetime.strptime(END, DATEFORMAT)): + cmd += " " + cmd += d.strftime(URL_FORMAT) + +print cmd
eebb736bf83c572b797931c571e7416223436461
homeassistant/components/light/insteon.py
homeassistant/components/light/insteon.py
""" homeassistant.components.light.insteon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for Insteon Hub lights. """ from homeassistant.components.insteon import (INSTEON, InsteonToggleDevice) def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up the Insteon Hub light platform. """ devs = [] for device in INSTEON.devices: if device.DeviceCategory == "Switched Lighting Control": devs.append(InsteonToggleDevice(device)) add_devices(devs)
""" homeassistant.components.light.insteon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for Insteon Hub lights. """ from homeassistant.components.insteon import (INSTEON, InsteonToggleDevice) def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up the Insteon Hub light platform. """ devs = [] for device in INSTEON.devices: if device.DeviceCategory == "Switched Lighting Control": devs.append(InsteonToggleDevice(device)) if device.DeviceCategory == "Dimmable Lighting Control": devs.append(InsteonToggleDevice(device)) add_devices(devs)
Add ability to control dimmable sources
Add ability to control dimmable sources
Python
mit
emilhetty/home-assistant,Duoxilian/home-assistant,rohitranjan1991/home-assistant,toddeye/home-assistant,ct-23/home-assistant,florianholzapfel/home-assistant,kennedyshead/home-assistant,keerts/home-assistant,lukas-hetzenecker/home-assistant,jabesq/home-assistant,JshWright/home-assistant,open-homeautomation/home-assistant,molobrakos/home-assistant,dmeulen/home-assistant,qedi-r/home-assistant,coteyr/home-assistant,molobrakos/home-assistant,leppa/home-assistant,miniconfig/home-assistant,alexmogavero/home-assistant,Duoxilian/home-assistant,DavidLP/home-assistant,instantchow/home-assistant,mKeRix/home-assistant,Smart-Torvy/torvy-home-assistant,hmronline/home-assistant,LinuxChristian/home-assistant,Julian/home-assistant,varunr047/homefile,balloob/home-assistant,kyvinh/home-assistant,rohitranjan1991/home-assistant,aequitas/home-assistant,dmeulen/home-assistant,postlund/home-assistant,luxus/home-assistant,jawilson/home-assistant,auduny/home-assistant,Zac-HD/home-assistant,LinuxChristian/home-assistant,tchellomello/home-assistant,Duoxilian/home-assistant,Julian/home-assistant,mikaelboman/home-assistant,shaftoe/home-assistant,luxus/home-assistant,tboyce1/home-assistant,happyleavesaoc/home-assistant,w1ll1am23/home-assistant,robjohnson189/home-assistant,JshWright/home-assistant,nkgilley/home-assistant,stefan-jonasson/home-assistant,pschmitt/home-assistant,balloob/home-assistant,hmronline/home-assistant,molobrakos/home-assistant,Zac-HD/home-assistant,betrisey/home-assistant,robjohnson189/home-assistant,sffjunkie/home-assistant,mezz64/home-assistant,Smart-Torvy/torvy-home-assistant,varunr047/homefile,jnewland/home-assistant,keerts/home-assistant,tinloaf/home-assistant,sander76/home-assistant,robbiet480/home-assistant,tinloaf/home-assistant,deisi/home-assistant,leoc/home-assistant,ma314smith/home-assistant,mikaelboman/home-assistant,morphis/home-assistant,DavidLP/home-assistant,varunr047/homefile,robjohnson189/home-assistant,ma314smith/home-assistant,open-homeautomation/home-assistant,oandrew/home-assistant,stefan-jonasson/home-assistant,keerts/home-assistant,titilambert/home-assistant,ma314smith/home-assistant,ct-23/home-assistant,tboyce1/home-assistant,toddeye/home-assistant,philipbl/home-assistant,emilhetty/home-assistant,shaftoe/home-assistant,nnic/home-assistant,florianholzapfel/home-assistant,Danielhiversen/home-assistant,xifle/home-assistant,auduny/home-assistant,fbradyirl/home-assistant,philipbl/home-assistant,coteyr/home-assistant,LinuxChristian/home-assistant,jamespcole/home-assistant,mKeRix/home-assistant,alexmogavero/home-assistant,jaharkes/home-assistant,turbokongen/home-assistant,shaftoe/home-assistant,jnewland/home-assistant,betrisey/home-assistant,MungoRae/home-assistant,MungoRae/home-assistant,nugget/home-assistant,oandrew/home-assistant,PetePriority/home-assistant,devdelay/home-assistant,aoakeson/home-assistant,eagleamon/home-assistant,titilambert/home-assistant,nnic/home-assistant,rohitranjan1991/home-assistant,jaharkes/home-assistant,hmronline/home-assistant,tinloaf/home-assistant,devdelay/home-assistant,MungoRae/home-assistant,srcLurker/home-assistant,aronsky/home-assistant,kyvinh/home-assistant,tchellomello/home-assistant,tboyce021/home-assistant,florianholzapfel/home-assistant,Zac-HD/home-assistant,jamespcole/home-assistant,alexmogavero/home-assistant,nugget/home-assistant,eagleamon/home-assistant,alexmogavero/home-assistant,happyleavesaoc/home-assistant,Theb-1/home-assistant,HydrelioxGitHub/home-assistant,luxus/home-assistant,Zac-HD/home-assistant,eagleamon/home-assistant,leoc/home-assistant,mikaelboman/home-assistant,deisi/home-assistant,postlund/home-assistant,mKeRix/home-assistant,ct-23/home-assistant,jawilson/home-assistant,leppa/home-assistant,auduny/home-assistant,happyleavesaoc/home-assistant,soldag/home-assistant,bdfoster/blumate,stefan-jonasson/home-assistant,leoc/home-assistant,instantchow/home-assistant,sffjunkie/home-assistant,sffjunkie/home-assistant,qedi-r/home-assistant,morphis/home-assistant,varunr047/homefile,jaharkes/home-assistant,betrisey/home-assistant,mKeRix/home-assistant,deisi/home-assistant,morphis/home-assistant,persandstrom/home-assistant,oandrew/home-assistant,MartinHjelmare/home-assistant,adrienbrault/home-assistant,Zyell/home-assistant,emilhetty/home-assistant,turbokongen/home-assistant,jabesq/home-assistant,emilhetty/home-assistant,srcLurker/home-assistant,Julian/home-assistant,HydrelioxGitHub/home-assistant,leoc/home-assistant,MartinHjelmare/home-assistant,Cinntax/home-assistant,hexxter/home-assistant,dmeulen/home-assistant,robjohnson189/home-assistant,aequitas/home-assistant,DavidLP/home-assistant,joopert/home-assistant,kennedyshead/home-assistant,philipbl/home-assistant,justyns/home-assistant,instantchow/home-assistant,w1ll1am23/home-assistant,FreekingDean/home-assistant,hexxter/home-assistant,Danielhiversen/home-assistant,betrisey/home-assistant,nnic/home-assistant,Cinntax/home-assistant,varunr047/homefile,mikaelboman/home-assistant,hmronline/home-assistant,Teagan42/home-assistant,mezz64/home-assistant,jabesq/home-assistant,keerts/home-assistant,sdague/home-assistant,morphis/home-assistant,xifle/home-assistant,devdelay/home-assistant,sander76/home-assistant,LinuxChristian/home-assistant,joopert/home-assistant,hmronline/home-assistant,kyvinh/home-assistant,sdague/home-assistant,eagleamon/home-assistant,Smart-Torvy/torvy-home-assistant,tboyce021/home-assistant,ewandor/home-assistant,bdfoster/blumate,ewandor/home-assistant,PetePriority/home-assistant,bdfoster/blumate,open-homeautomation/home-assistant,dmeulen/home-assistant,shaftoe/home-assistant,MungoRae/home-assistant,aequitas/home-assistant,ct-23/home-assistant,xifle/home-assistant,JshWright/home-assistant,LinuxChristian/home-assistant,kyvinh/home-assistant,partofthething/home-assistant,bdfoster/blumate,ewandor/home-assistant,Zyell/home-assistant,emilhetty/home-assistant,FreekingDean/home-assistant,miniconfig/home-assistant,coteyr/home-assistant,Teagan42/home-assistant,mikaelboman/home-assistant,Julian/home-assistant,philipbl/home-assistant,PetePriority/home-assistant,Theb-1/home-assistant,jnewland/home-assistant,ma314smith/home-assistant,GenericStudent/home-assistant,deisi/home-assistant,Smart-Torvy/torvy-home-assistant,nugget/home-assistant,soldag/home-assistant,home-assistant/home-assistant,MungoRae/home-assistant,deisi/home-assistant,robbiet480/home-assistant,sffjunkie/home-assistant,GenericStudent/home-assistant,aoakeson/home-assistant,justyns/home-assistant,xifle/home-assistant,oandrew/home-assistant,Zyell/home-assistant,justyns/home-assistant,sffjunkie/home-assistant,pschmitt/home-assistant,fbradyirl/home-assistant,miniconfig/home-assistant,hexxter/home-assistant,srcLurker/home-assistant,open-homeautomation/home-assistant,srcLurker/home-assistant,JshWright/home-assistant,aronsky/home-assistant,home-assistant/home-assistant,nkgilley/home-assistant,jamespcole/home-assistant,bdfoster/blumate,miniconfig/home-assistant,fbradyirl/home-assistant,Duoxilian/home-assistant,devdelay/home-assistant,jaharkes/home-assistant,Theb-1/home-assistant,stefan-jonasson/home-assistant,florianholzapfel/home-assistant,balloob/home-assistant,ct-23/home-assistant,tboyce1/home-assistant,tboyce1/home-assistant,happyleavesaoc/home-assistant,hexxter/home-assistant,lukas-hetzenecker/home-assistant,adrienbrault/home-assistant,HydrelioxGitHub/home-assistant,partofthething/home-assistant,persandstrom/home-assistant,MartinHjelmare/home-assistant,aoakeson/home-assistant,persandstrom/home-assistant
--- +++ @@ -13,4 +13,6 @@ for device in INSTEON.devices: if device.DeviceCategory == "Switched Lighting Control": devs.append(InsteonToggleDevice(device)) + if device.DeviceCategory == "Dimmable Lighting Control": + devs.append(InsteonToggleDevice(device)) add_devices(devs)
cbc4269b78e3ce2edb116323353f92c9b2a4d15b
test/unit/ggrc/models/base_mixins.py
test/unit/ggrc/models/base_mixins.py
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: anze@reciprocitylabs.com # Maintained By: anze@reciprocitylabs.com """Base class for testing mixins on models""" import unittest class BaseMixins(unittest.TestCase): """Tests inclusion of correct mixins and their attributes""" def setUp(self): self.model = None self.included_mixins = [] self.attributes_introduced = [] def test_includes_correct_mixins(self): for mixin in self.included_mixins: self.assertTrue( issubclass(self.model, mixin), 'Expected {} to inherit from {} but it does not'.format( self.model.__name__, mixin) ) def test_correct_attrs_introduced(self): for attr_name, expected_type in self.attributes_introduced: actual_type = type(getattr(self.model, attr_name)) self.assertEqual( expected_type, actual_type, 'Expected attr "{}" to be of type {} but is actually {}' .format(attr_name, expected_type, actual_type) )
Add base class for testing mixins and attributes
Add base class for testing mixins and attributes
Python
apache-2.0
NejcZupec/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,josthkko/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core
--- +++ @@ -0,0 +1,35 @@ +# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file> +# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> +# Created By: anze@reciprocitylabs.com +# Maintained By: anze@reciprocitylabs.com + +"""Base class for testing mixins on models""" + +import unittest + + +class BaseMixins(unittest.TestCase): + """Tests inclusion of correct mixins and their attributes""" + + def setUp(self): + self.model = None + self.included_mixins = [] + self.attributes_introduced = [] + + def test_includes_correct_mixins(self): + for mixin in self.included_mixins: + self.assertTrue( + issubclass(self.model, mixin), + 'Expected {} to inherit from {} but it does not'.format( + self.model.__name__, mixin) + ) + + def test_correct_attrs_introduced(self): + for attr_name, expected_type in self.attributes_introduced: + actual_type = type(getattr(self.model, attr_name)) + self.assertEqual( + expected_type, + actual_type, + 'Expected attr "{}" to be of type {} but is actually {}' + .format(attr_name, expected_type, actual_type) + )
ad37a8cf39e79c0988bd76e11615873993c394b6
randomXKCDPassword.py
randomXKCDPassword.py
#!/usr/bin/env python """ See https://xkcd.com/936/ for why you might want to generate these passphrases. Does require /usr/share/dict/words to be a file with words on multiple lines. On Debian derived OS like Ubuntu install wbritish-insane package and `sudo select-default-wordlist` to set it as the default. """ import random import re import sys def randomWords(num=4, dictionaryfile="/usr/share/dict/words"): r = random.SystemRandom() # i.e. preferably not pseudo-random f = open(dictionaryfile, "r") chosen = [] wordlist = [] prog = re.compile("^[a-z]{5,9}$") # reasonable length, no proper nouns if(f): for word in f: if(prog.match(word)): wordlist.append(word) # Not sure how python calculates length, im assuming 32bits of mem vs # counting it evry time is a good trade. wordlistlen = len(wordlist) for i in range(num): word = wordlist[r.randint(0,wordlistlen)] chosen.append(word.strip()) return chosen if __name__ == "__main__": num = 4 if (len(sys.argv) > 1 and str.isdigit(sys.argv[1])): num = int(sys.argv[1]) print ".".join(randomWords(num))
Add script to generate XKCD password.
Add script to generate XKCD password.
Python
mit
ddryden/legendary-octo-engine,ddryden/legendary-octo-engine
--- +++ @@ -0,0 +1,41 @@ +#!/usr/bin/env python +""" + See https://xkcd.com/936/ for why you might want to generate these + passphrases. + + Does require /usr/share/dict/words to be a file with words on + multiple lines. On Debian derived OS like Ubuntu install wbritish-insane + package and `sudo select-default-wordlist` to set it as the default. +""" + +import random +import re +import sys + + + +def randomWords(num=4, dictionaryfile="/usr/share/dict/words"): + r = random.SystemRandom() # i.e. preferably not pseudo-random + f = open(dictionaryfile, "r") + chosen = [] + wordlist = [] + prog = re.compile("^[a-z]{5,9}$") # reasonable length, no proper nouns + if(f): + for word in f: + if(prog.match(word)): + wordlist.append(word) + # Not sure how python calculates length, im assuming 32bits of mem vs + # counting it evry time is a good trade. + wordlistlen = len(wordlist) + for i in range(num): + word = wordlist[r.randint(0,wordlistlen)] + chosen.append(word.strip()) + return chosen + + +if __name__ == "__main__": + num = 4 + if (len(sys.argv) > 1 and str.isdigit(sys.argv[1])): + num = int(sys.argv[1]) + + print ".".join(randomWords(num))
fd3f0ab94beb19181636f190c79d3cd17ee03b36
test/interface/conflict-util.py
test/interface/conflict-util.py
#!/usr/bin/env python # Copyright 2010-2012 RethinkDB, all rights reserved. import sys, os, time sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) import driver, http_admin, scenario_common from vcoptparse import * op = OptParser() scenario_common.prepare_option_parser_mode_flags(op) opts = op.parse(sys.argv) with driver.Metacluster() as metacluster: cluster1 = driver.Cluster(metacluster) executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) print "Spinning up two processes..." files1 = driver.Files(metacluster, log_path = "create-output-1", executable_path = executable_path, command_prefix = command_prefix) proc1 = driver.Process(cluster1, files1, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) files2 = driver.Files(metacluster, log_path = "create-output-2", executable_path = executable_path, command_prefix = command_prefix) proc2 = driver.Process(cluster1, files2, executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) proc1.wait_until_started_up() proc2.wait_until_started_up() cluster1.check() access1 = http_admin.ClusterAccess([("localhost", proc1.http_port)]) access2 = http_admin.ClusterAccess([("localhost", proc2.http_port)]) access2.update_cluster_data(10) assert len(access1.get_directory()) == len(access2.get_directory()) == 2 print "Hit enter to split the cluster" raw_input() print "Splitting cluster..." cluster2 = driver.Cluster(metacluster) metacluster.move_processes(cluster1, cluster2, [proc2]) time.sleep(20) print "Hit enter to rejoin the cluster" raw_input() print "Joining cluster..." metacluster.move_processes(cluster2, cluster1, [proc2]) cluster1.check() cluster2.check() issues = access1.get_issues() #assert issues[0]["type"] == "VCLOCK_CONFLICT" #assert len(access1.get_directory()) == len(access2.get_directory()) == 2 time.sleep(1000000) print "Done."
Create conflict in a more user-friendly way
Create conflict in a more user-friendly way
Python
apache-2.0
marshall007/rethinkdb,grandquista/rethinkdb,pap/rethinkdb,nviennot/rethinkdb,spblightadv/rethinkdb,rrampage/rethinkdb,jmptrader/rethinkdb,bpradipt/rethinkdb,grandquista/rethinkdb,sebadiaz/rethinkdb,niieani/rethinkdb,elkingtonmcb/rethinkdb,lenstr/rethinkdb,yaolinz/rethinkdb,scripni/rethinkdb,lenstr/rethinkdb,gavioto/rethinkdb,mbroadst/rethinkdb,gavioto/rethinkdb,AntouanK/rethinkdb,lenstr/rethinkdb,jfriedly/rethinkdb,ayumilong/rethinkdb,jmptrader/rethinkdb,ayumilong/rethinkdb,yaolinz/rethinkdb,grandquista/rethinkdb,gavioto/rethinkdb,niieani/rethinkdb,ayumilong/rethinkdb,AtnNn/rethinkdb,KSanthanam/rethinkdb,jfriedly/rethinkdb,jfriedly/rethinkdb,bchavez/rethinkdb,wkennington/rethinkdb,yaolinz/rethinkdb,urandu/rethinkdb,eliangidoni/rethinkdb,mbroadst/rethinkdb,jfriedly/rethinkdb,sontek/rethinkdb,wojons/rethinkdb,4talesa/rethinkdb,mcanthony/rethinkdb,yakovenkodenis/rethinkdb,jfriedly/rethinkdb,JackieXie168/rethinkdb,Qinusty/rethinkdb,yakovenkodenis/rethinkdb,Wilbeibi/rethinkdb,sontek/rethinkdb,spblightadv/rethinkdb,rrampage/rethinkdb,eliangidoni/rethinkdb,scripni/rethinkdb,marshall007/rethinkdb,victorbriz/rethinkdb,pap/rethinkdb,matthaywardwebdesign/rethinkdb,victorbriz/rethinkdb,pap/rethinkdb,jmptrader/rethinkdb,Qinusty/rethinkdb,scripni/rethinkdb,tempbottle/rethinkdb,mbroadst/rethinkdb,eliangidoni/rethinkdb,matthaywardwebdesign/rethinkdb,wojons/rethinkdb,Wilbeibi/rethinkdb,ayumilong/rethinkdb,bchavez/rethinkdb,elkingtonmcb/rethinkdb,AntouanK/rethinkdb,mcanthony/rethinkdb,grandquista/rethinkdb,rrampage/rethinkdb,gdi2290/rethinkdb,eliangidoni/rethinkdb,losywee/rethinkdb,elkingtonmcb/rethinkdb,alash3al/rethinkdb,wojons/rethinkdb,sontek/rethinkdb,yaolinz/rethinkdb,niieani/rethinkdb,victorbriz/rethinkdb,wujf/rethinkdb,matthaywardwebdesign/rethinkdb,yakovenkodenis/rethinkdb,4talesa/rethinkdb,nviennot/rethinkdb,jesseditson/rethinkdb,elkingtonmcb/rethinkdb,dparnell/rethinkdb,AtnNn/rethinkdb,yaolinz/rethinkdb,catroot/rethinkdb,JackieXie168/rethinkdb,mquandalle/rethinkdb,mquandalle/rethinkdb,sebadiaz/rethinkdb,JackieXie168/rethinkdb,tempbottle/rethinkdb,matthaywardwebdesign/rethinkdb,sbusso/rethinkdb,ajose01/rethinkdb,spblightadv/rethinkdb,victorbriz/rethinkdb,jfriedly/rethinkdb,Qinusty/rethinkdb,lenstr/rethinkdb,jesseditson/rethinkdb,mcanthony/rethinkdb,eliangidoni/rethinkdb,KSanthanam/rethinkdb,greyhwndz/rethinkdb,RubenKelevra/rethinkdb,gdi2290/rethinkdb,scripni/rethinkdb,yakovenkodenis/rethinkdb,nviennot/rethinkdb,RubenKelevra/rethinkdb,AtnNn/rethinkdb,sbusso/rethinkdb,AtnNn/rethinkdb,JackieXie168/rethinkdb,alash3al/rethinkdb,catroot/rethinkdb,ayumilong/rethinkdb,mquandalle/rethinkdb,niieani/rethinkdb,marshall007/rethinkdb,KSanthanam/rethinkdb,sbusso/rethinkdb,captainpete/rethinkdb,AntouanK/rethinkdb,jmptrader/rethinkdb,scripni/rethinkdb,greyhwndz/rethinkdb,Qinusty/rethinkdb,pap/rethinkdb,greyhwndz/rethinkdb,matthaywardwebdesign/rethinkdb,sontek/rethinkdb,matthaywardwebdesign/rethinkdb,losywee/rethinkdb,losywee/rethinkdb,catroot/rethinkdb,sbusso/rethinkdb,wkennington/rethinkdb,ajose01/rethinkdb,catroot/rethinkdb,ayumilong/rethinkdb,robertjpayne/rethinkdb,captainpete/rethinkdb,RubenKelevra/rethinkdb,ayumilong/rethinkdb,yakovenkodenis/rethinkdb,nviennot/rethinkdb,JackieXie168/rethinkdb,RubenKelevra/rethinkdb,dparnell/rethinkdb,ayumilong/rethinkdb,mquandalle/rethinkdb,AtnNn/rethinkdb,wojons/rethinkdb,marshall007/rethinkdb,urandu/rethinkdb,gdi2290/rethinkdb,mbroadst/rethinkdb,wojons/rethinkdb,bpradipt/rethinkdb,catroot/rethinkdb,rrampage/rethinkdb,mcanthony/rethinkdb,lenstr/rethinkdb,pap/rethinkdb,wojons/rethinkdb,ajose01/rethinkdb,yakovenkodenis/rethinkdb,bpradipt/rethinkdb,captainpete/rethinkdb,AtnNn/rethinkdb,sbusso/rethinkdb,grandquista/rethinkdb,Qinusty/rethinkdb,tempbottle/rethinkdb,wkennington/rethinkdb,AntouanK/rethinkdb,bpradipt/rethinkdb,ajose01/rethinkdb,greyhwndz/rethinkdb,tempbottle/rethinkdb,spblightadv/rethinkdb,wojons/rethinkdb,RubenKelevra/rethinkdb,ajose01/rethinkdb,tempbottle/rethinkdb,sbusso/rethinkdb,elkingtonmcb/rethinkdb,bpradipt/rethinkdb,robertjpayne/rethinkdb,wujf/rethinkdb,jesseditson/rethinkdb,Qinusty/rethinkdb,AtnNn/rethinkdb,sontek/rethinkdb,mbroadst/rethinkdb,urandu/rethinkdb,elkingtonmcb/rethinkdb,robertjpayne/rethinkdb,gavioto/rethinkdb,sontek/rethinkdb,catroot/rethinkdb,niieani/rethinkdb,KSanthanam/rethinkdb,mbroadst/rethinkdb,dparnell/rethinkdb,Wilbeibi/rethinkdb,catroot/rethinkdb,mquandalle/rethinkdb,gdi2290/rethinkdb,AntouanK/rethinkdb,Qinusty/rethinkdb,urandu/rethinkdb,elkingtonmcb/rethinkdb,tempbottle/rethinkdb,lenstr/rethinkdb,sbusso/rethinkdb,4talesa/rethinkdb,grandquista/rethinkdb,niieani/rethinkdb,captainpete/rethinkdb,jmptrader/rethinkdb,JackieXie168/rethinkdb,sebadiaz/rethinkdb,robertjpayne/rethinkdb,marshall007/rethinkdb,alash3al/rethinkdb,mquandalle/rethinkdb,victorbriz/rethinkdb,gdi2290/rethinkdb,scripni/rethinkdb,jmptrader/rethinkdb,KSanthanam/rethinkdb,spblightadv/rethinkdb,captainpete/rethinkdb,robertjpayne/rethinkdb,scripni/rethinkdb,robertjpayne/rethinkdb,dparnell/rethinkdb,tempbottle/rethinkdb,scripni/rethinkdb,sebadiaz/rethinkdb,spblightadv/rethinkdb,alash3al/rethinkdb,sebadiaz/rethinkdb,RubenKelevra/rethinkdb,captainpete/rethinkdb,losywee/rethinkdb,spblightadv/rethinkdb,RubenKelevra/rethinkdb,tempbottle/rethinkdb,gavioto/rethinkdb,4talesa/rethinkdb,sebadiaz/rethinkdb,Wilbeibi/rethinkdb,yakovenkodenis/rethinkdb,victorbriz/rethinkdb,eliangidoni/rethinkdb,bchavez/rethinkdb,rrampage/rethinkdb,wujf/rethinkdb,matthaywardwebdesign/rethinkdb,4talesa/rethinkdb,victorbriz/rethinkdb,gavioto/rethinkdb,alash3al/rethinkdb,wujf/rethinkdb,sebadiaz/rethinkdb,wojons/rethinkdb,marshall007/rethinkdb,gdi2290/rethinkdb,spblightadv/rethinkdb,mquandalle/rethinkdb,dparnell/rethinkdb,dparnell/rethinkdb,catroot/rethinkdb,mbroadst/rethinkdb,bchavez/rethinkdb,Wilbeibi/rethinkdb,JackieXie168/rethinkdb,jfriedly/rethinkdb,jesseditson/rethinkdb,wkennington/rethinkdb,greyhwndz/rethinkdb,4talesa/rethinkdb,KSanthanam/rethinkdb,bchavez/rethinkdb,JackieXie168/rethinkdb,wkennington/rethinkdb,bchavez/rethinkdb,elkingtonmcb/rethinkdb,grandquista/rethinkdb,eliangidoni/rethinkdb,mbroadst/rethinkdb,rrampage/rethinkdb,dparnell/rethinkdb,lenstr/rethinkdb,gavioto/rethinkdb,gdi2290/rethinkdb,mcanthony/rethinkdb,pap/rethinkdb,AntouanK/rethinkdb,yaolinz/rethinkdb,urandu/rethinkdb,dparnell/rethinkdb,RubenKelevra/rethinkdb,marshall007/rethinkdb,eliangidoni/rethinkdb,alash3al/rethinkdb,pap/rethinkdb,ajose01/rethinkdb,nviennot/rethinkdb,rrampage/rethinkdb,wkennington/rethinkdb,yakovenkodenis/rethinkdb,sontek/rethinkdb,pap/rethinkdb,AntouanK/rethinkdb,Qinusty/rethinkdb,nviennot/rethinkdb,losywee/rethinkdb,captainpete/rethinkdb,jesseditson/rethinkdb,greyhwndz/rethinkdb,yaolinz/rethinkdb,wkennington/rethinkdb,wujf/rethinkdb,captainpete/rethinkdb,bpradipt/rethinkdb,dparnell/rethinkdb,urandu/rethinkdb,alash3al/rethinkdb,rrampage/rethinkdb,mcanthony/rethinkdb,AtnNn/rethinkdb,mcanthony/rethinkdb,lenstr/rethinkdb,ajose01/rethinkdb,4talesa/rethinkdb,nviennot/rethinkdb,wkennington/rethinkdb,greyhwndz/rethinkdb,niieani/rethinkdb,robertjpayne/rethinkdb,marshall007/rethinkdb,alash3al/rethinkdb,AntouanK/rethinkdb,Qinusty/rethinkdb,matthaywardwebdesign/rethinkdb,Wilbeibi/rethinkdb,4talesa/rethinkdb,Wilbeibi/rethinkdb,Wilbeibi/rethinkdb,bpradipt/rethinkdb,losywee/rethinkdb,jesseditson/rethinkdb,urandu/rethinkdb,JackieXie168/rethinkdb,sontek/rethinkdb,gavioto/rethinkdb,bpradipt/rethinkdb,KSanthanam/rethinkdb,nviennot/rethinkdb,yaolinz/rethinkdb,mcanthony/rethinkdb,mbroadst/rethinkdb,bchavez/rethinkdb,grandquista/rethinkdb,KSanthanam/rethinkdb,jfriedly/rethinkdb,bpradipt/rethinkdb,sbusso/rethinkdb,jesseditson/rethinkdb,ajose01/rethinkdb,bchavez/rethinkdb,losywee/rethinkdb,losywee/rethinkdb,jesseditson/rethinkdb,robertjpayne/rethinkdb,greyhwndz/rethinkdb,sebadiaz/rethinkdb,niieani/rethinkdb,robertjpayne/rethinkdb,victorbriz/rethinkdb,bchavez/rethinkdb,grandquista/rethinkdb,jmptrader/rethinkdb,wujf/rethinkdb,mquandalle/rethinkdb,urandu/rethinkdb,eliangidoni/rethinkdb,jmptrader/rethinkdb,wujf/rethinkdb
--- +++ @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# Copyright 2010-2012 RethinkDB, all rights reserved. +import sys, os, time +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common'))) +import driver, http_admin, scenario_common +from vcoptparse import * + +op = OptParser() +scenario_common.prepare_option_parser_mode_flags(op) +opts = op.parse(sys.argv) + +with driver.Metacluster() as metacluster: + cluster1 = driver.Cluster(metacluster) + executable_path, command_prefix, serve_options = scenario_common.parse_mode_flags(opts) + print "Spinning up two processes..." + files1 = driver.Files(metacluster, log_path = "create-output-1", + executable_path = executable_path, command_prefix = command_prefix) + proc1 = driver.Process(cluster1, files1, + executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) + files2 = driver.Files(metacluster, log_path = "create-output-2", + executable_path = executable_path, command_prefix = command_prefix) + proc2 = driver.Process(cluster1, files2, + executable_path = executable_path, command_prefix = command_prefix, extra_options = serve_options) + proc1.wait_until_started_up() + proc2.wait_until_started_up() + cluster1.check() + + access1 = http_admin.ClusterAccess([("localhost", proc1.http_port)]) + access2 = http_admin.ClusterAccess([("localhost", proc2.http_port)]) + + access2.update_cluster_data(10) + assert len(access1.get_directory()) == len(access2.get_directory()) == 2 + + + print "Hit enter to split the cluster" + raw_input() + + print "Splitting cluster..." + cluster2 = driver.Cluster(metacluster) + metacluster.move_processes(cluster1, cluster2, [proc2]) + time.sleep(20) + + + print "Hit enter to rejoin the cluster" + raw_input() + print "Joining cluster..." + metacluster.move_processes(cluster2, cluster1, [proc2]) + cluster1.check() + cluster2.check() + issues = access1.get_issues() + #assert issues[0]["type"] == "VCLOCK_CONFLICT" + #assert len(access1.get_directory()) == len(access2.get_directory()) == 2 + + time.sleep(1000000) +print "Done." +
b555659518097db41a02d505ebfaf88e828b2f30
tests/functional/test_endpoints.py
tests/functional/test_endpoints.py
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from nose.tools import assert_equal from botocore.session import get_session # Several services have names that don't match for one reason or another. SERVICE_RENAMES = { 'application-autoscaling': 'autoscaling', 'appstream': 'appstream2', 'dynamodbstreams': 'streams.dynamodb', 'cloudwatch': 'monitoring', 'efs': 'elasticfilesystem', 'elb': 'elasticloadbalancing', 'elbv2': 'elasticloadbalancing', 'emr': 'elasticmapreduce', 'iot-data': 'data.iot', 'meteringmarketplace': 'metering.marketplace', 'opsworkscm': 'opsworks-cm', 'ses': 'email', 'stepfunctions': 'states' } def test_service_name_matches_endpoint_prefix(): # Generates tests for each service to verify that the endpoint prefix # matches the service name unless there is an explicit exception. session = get_session() loader = session.get_component('data_loader') # Load the list of available services. The names here represent what # will become the client names. services = loader.list_available_services('service-2') for service in services: yield _assert_service_name_matches_endpoint_prefix, loader, service def _assert_service_name_matches_endpoint_prefix(loader, service_name): # Load the service model and grab its endpoint prefix service_model = loader.load_service_model(service_name, 'service-2') endpoint_prefix = service_model['metadata']['endpointPrefix'] # Handle known exceptions where we have renamed the service directory # for one reason or another. expected_endpoint_prefix = SERVICE_RENAMES.get(service_name, service_name) assert_equal( endpoint_prefix, expected_endpoint_prefix, "Service name `%s` does not match endpoint prefix `%s`." % ( service_name, expected_endpoint_prefix))
Verify endpoint prefix matches service name
Verify endpoint prefix matches service name This protects us from cases where we mis-name the artifacts that we base our service names on. Generally these should be named based on the endpoint prefix, except in a handful of special cases.
Python
apache-2.0
boto/botocore,pplu/botocore
--- +++ @@ -0,0 +1,60 @@ +# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. +from nose.tools import assert_equal +from botocore.session import get_session + + +# Several services have names that don't match for one reason or another. +SERVICE_RENAMES = { + 'application-autoscaling': 'autoscaling', + 'appstream': 'appstream2', + 'dynamodbstreams': 'streams.dynamodb', + 'cloudwatch': 'monitoring', + 'efs': 'elasticfilesystem', + 'elb': 'elasticloadbalancing', + 'elbv2': 'elasticloadbalancing', + 'emr': 'elasticmapreduce', + 'iot-data': 'data.iot', + 'meteringmarketplace': 'metering.marketplace', + 'opsworkscm': 'opsworks-cm', + 'ses': 'email', + 'stepfunctions': 'states' +} + + +def test_service_name_matches_endpoint_prefix(): + # Generates tests for each service to verify that the endpoint prefix + # matches the service name unless there is an explicit exception. + session = get_session() + loader = session.get_component('data_loader') + + # Load the list of available services. The names here represent what + # will become the client names. + services = loader.list_available_services('service-2') + + for service in services: + yield _assert_service_name_matches_endpoint_prefix, loader, service + + +def _assert_service_name_matches_endpoint_prefix(loader, service_name): + # Load the service model and grab its endpoint prefix + service_model = loader.load_service_model(service_name, 'service-2') + endpoint_prefix = service_model['metadata']['endpointPrefix'] + + # Handle known exceptions where we have renamed the service directory + # for one reason or another. + expected_endpoint_prefix = SERVICE_RENAMES.get(service_name, service_name) + assert_equal( + endpoint_prefix, expected_endpoint_prefix, + "Service name `%s` does not match endpoint prefix `%s`." % ( + service_name, expected_endpoint_prefix))
f54c8f3b40bf44c4ba0f9fd1d1b6187991c327d5
tests/lints/check-external-size.py
tests/lints/check-external-size.py
#!/usr/bin/env python # -*- coding: utf8 -*- """ This script checks that all the external archive included in the repository are as small as they can be. """ from __future__ import print_function import os import sys import glob ROOT = os.path.join(os.path.dirname(__file__), "..", "..") ERRORS = 0 # when adding new files here, make sure that they are as small as possible! EXPECTED_SIZES = { "bzip2.tar.gz": 344, "fmt.tar.gz": 745, "gemmi.tar.gz": 476, "lzma.tar.gz": 256, "mmtf-cpp.tar.gz": 439, "molfiles.tar.gz": 477, "netcdf.tar.gz": 494, "pugixml.tar.gz": 549, "tng.tar.gz": 317, "xdrfile.tar.gz": 41, "zlib.tar.gz": 370, } def error(message): global ERRORS ERRORS += 1 print(message) if __name__ == "__main__": for path in glob.glob(os.path.join(ROOT, "external", "*.tar.gz")): size = os.path.getsize(path) size_kb = size // 1024 name = os.path.basename(path) if name not in EXPECTED_SIZES: error("{} is not a known external file, please edit this file".format(name)) expected = EXPECTED_SIZES[name] if size_kb > 1.1 * expected: error("{} size increased by more than 10%".format(name)) if size_kb < 0.7 * expected: error("{} size decreased by more than 30%, edit this file".format(name)) if ERRORS != 0: sys.exit(1)
Add a test checking the external archive size
Add a test checking the external archive size This should prevent size regressions
Python
bsd-3-clause
Luthaf/Chemharp,chemfiles/chemfiles,chemfiles/chemfiles,chemfiles/chemfiles,Luthaf/Chemharp,Luthaf/Chemharp,chemfiles/chemfiles
--- +++ @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# -*- coding: utf8 -*- +""" +This script checks that all the external archive included in the repository are +as small as they can be. +""" +from __future__ import print_function +import os +import sys +import glob + +ROOT = os.path.join(os.path.dirname(__file__), "..", "..") +ERRORS = 0 + + +# when adding new files here, make sure that they are as small as possible! +EXPECTED_SIZES = { + "bzip2.tar.gz": 344, + "fmt.tar.gz": 745, + "gemmi.tar.gz": 476, + "lzma.tar.gz": 256, + "mmtf-cpp.tar.gz": 439, + "molfiles.tar.gz": 477, + "netcdf.tar.gz": 494, + "pugixml.tar.gz": 549, + "tng.tar.gz": 317, + "xdrfile.tar.gz": 41, + "zlib.tar.gz": 370, +} + + +def error(message): + global ERRORS + ERRORS += 1 + print(message) + + +if __name__ == "__main__": + for path in glob.glob(os.path.join(ROOT, "external", "*.tar.gz")): + size = os.path.getsize(path) + size_kb = size // 1024 + name = os.path.basename(path) + + if name not in EXPECTED_SIZES: + error("{} is not a known external file, please edit this file".format(name)) + + expected = EXPECTED_SIZES[name] + if size_kb > 1.1 * expected: + error("{} size increased by more than 10%".format(name)) + + if size_kb < 0.7 * expected: + error("{} size decreased by more than 30%, edit this file".format(name)) + + if ERRORS != 0: + sys.exit(1)
9893af1b94fc460d017d7bdc9306bb00660bd408
circle_fit/circle_fit.py
circle_fit/circle_fit.py
import numpy as np import matplotlib.pyplot as plt def func(w, wr, nr, c): return c / (wr**2 - w**2 + 1j * nr * wr**2) def circle_fit(data): # Take the real and imaginary parts x = data.real y = data.imag # Use the method from "Theoretical and Experimental Modal Analysis" p221 # Set up the matrices xs = np.sum(x) ys = np.sum(y) xx = np.square(x).sum() yy = np.square(y).sum() xy = np.sum(x*y) L = data.size xxx = np.sum(x*np.square(x)) yyy = np.sum(y*np.square(y)) xyy = np.sum(x*np.square(y)) yxx = np.sum(y*np.square(x)) A = np.asarray([[xx, xy, -xs], [xy, yy, -ys], [-xs, -ys, L]]) B = np.asarray([[-(xxx + xyy)], [-(yyy + yxx)], [xx + yy]]) # Solve the equation v = np.linalg.solve(A, B) # Find the circle parameters x0 = v[0]/-2 y0 = v[1]/-2 R0 = np.sqrt(v[2] + x0**2 + y0**2) return x0, y0, R0 def circle_plot(x0, y0, R0): theta = np.linspace(-np.pi, np.pi, 180) x = x0[0] + R0[0]*np.cos(theta) y = y0[0] + R0[0]*np.sin(theta) plt.plot(x, y, '--') plt.axis('equal') w = np.linspace(0, 25 ,1e5) d = func(w, 5, 1, 1j) plt.figure() plt.plot(w, np.abs(d)) # Nyquist plt.figure() plt.plot(d.real, d.imag) # Circle x0, y0, R0 = circle_fit(d) circle_plot(x0, y0, R0)
Write simple least-squares fit, from "Theoretical and Experimental Modal Analysis"
Write simple least-squares fit, from "Theoretical and Experimental Modal Analysis"
Python
bsd-3-clause
torebutlin/cued_datalogger
--- +++ @@ -0,0 +1,67 @@ +import numpy as np + +import matplotlib.pyplot as plt + +def func(w, wr, nr, c): + return c / (wr**2 - w**2 + 1j * nr * wr**2) + +def circle_fit(data): + # Take the real and imaginary parts + x = data.real + y = data.imag + + # Use the method from "Theoretical and Experimental Modal Analysis" p221 + # Set up the matrices + xs = np.sum(x) + ys = np.sum(y) + xx = np.square(x).sum() + yy = np.square(y).sum() + xy = np.sum(x*y) + L = data.size + xxx = np.sum(x*np.square(x)) + yyy = np.sum(y*np.square(y)) + xyy = np.sum(x*np.square(y)) + yxx = np.sum(y*np.square(x)) + + A = np.asarray([[xx, xy, -xs], + [xy, yy, -ys], + [-xs, -ys, L]]) + + B = np.asarray([[-(xxx + xyy)], + [-(yyy + yxx)], + [xx + yy]]) + + # Solve the equation + v = np.linalg.solve(A, B) + + # Find the circle parameters + x0 = v[0]/-2 + y0 = v[1]/-2 + R0 = np.sqrt(v[2] + x0**2 + y0**2) + return x0, y0, R0 + +def circle_plot(x0, y0, R0): + theta = np.linspace(-np.pi, np.pi, 180) + x = x0[0] + R0[0]*np.cos(theta) + y = y0[0] + R0[0]*np.sin(theta) + plt.plot(x, y, '--') + plt.axis('equal') + + +w = np.linspace(0, 25 ,1e5) +d = func(w, 5, 1, 1j) +plt.figure() +plt.plot(w, np.abs(d)) + +# Nyquist +plt.figure() +plt.plot(d.real, d.imag) + +# Circle +x0, y0, R0 = circle_fit(d) +circle_plot(x0, y0, R0) + + + + +
9aef590c097f0544ff0e3f116a5d8547b5d4adc2
tools/filldb.py
tools/filldb.py
#!/usr/bin/env python from cli import * # directory of ukwords w = [x.replace('\n', '') for x in open('../ukwords_small')] essence = Variable('Essence', 1) coal = Variable('Coal', 1) iron = Variable('Iron', 1) oak = Variable('Oak', 1) yew = Variable('Yew', 1) tuna = Variable('Tuna', 1) salmon = Variable('Salmon', 1) session.add(essence) session.add(coal) session.add(iron) session.add(oak) session.add(yew) session.add(tuna) session.add(salmon) s1 = Script('Essence Miner') s2 = Script('Iron Miner') s3 = Script('Fisher') s4 = Script('Woodcutter') s5 = Script('Edgeville Yew Cutter') s6 = Script('Lumbridge Coal / Iron Miner') s1.variables.append(essence) s2.variables.append(iron) s3.variables.append(tuna) s3.variables.append(salmon) s4.variables.append(oak) s4.variables.append(yew) s5.variables.append(yew) s6.variables.append(iron) s6.variables.append(coal) session.add(s1) session.add(s2) session.add(s3) session.add(s4) session.add(s5) session.add(s6) for i in range(100): u = User(w[i], w[i]) session.add(u) ul = session.query(User).all() from random import randrange s1.owner = ul[randrange(0,99)] s2.owner = ul[randrange(0,99)] s3.owner = ul[randrange(0,99)] s4.owner = ul[randrange(0,99)] s5.owner = ul[randrange(0,99)] s6.owner = ul[randrange(0,99)] session.commit()
Add FillDB; script to set up a test db.
Tools: Add FillDB; script to set up a test db.
Python
agpl-3.0
MerlijnWajer/SRL-Stats
--- +++ @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +from cli import * + +# directory of ukwords +w = [x.replace('\n', '') for x in open('../ukwords_small')] + +essence = Variable('Essence', 1) +coal = Variable('Coal', 1) +iron = Variable('Iron', 1) +oak = Variable('Oak', 1) +yew = Variable('Yew', 1) +tuna = Variable('Tuna', 1) +salmon = Variable('Salmon', 1) + +session.add(essence) +session.add(coal) +session.add(iron) +session.add(oak) +session.add(yew) +session.add(tuna) +session.add(salmon) + +s1 = Script('Essence Miner') +s2 = Script('Iron Miner') +s3 = Script('Fisher') +s4 = Script('Woodcutter') +s5 = Script('Edgeville Yew Cutter') +s6 = Script('Lumbridge Coal / Iron Miner') + +s1.variables.append(essence) +s2.variables.append(iron) +s3.variables.append(tuna) +s3.variables.append(salmon) +s4.variables.append(oak) +s4.variables.append(yew) +s5.variables.append(yew) +s6.variables.append(iron) +s6.variables.append(coal) + +session.add(s1) +session.add(s2) +session.add(s3) +session.add(s4) +session.add(s5) +session.add(s6) + +for i in range(100): + u = User(w[i], w[i]) + session.add(u) + +ul = session.query(User).all() + +from random import randrange +s1.owner = ul[randrange(0,99)] +s2.owner = ul[randrange(0,99)] +s3.owner = ul[randrange(0,99)] +s4.owner = ul[randrange(0,99)] +s5.owner = ul[randrange(0,99)] +s6.owner = ul[randrange(0,99)] + +session.commit()
07d2cb651903545d38a7e12f656d0fde920e7102
find-in-balanced-by-username.py
find-in-balanced-by-username.py
#!./env/bin/python """This is a workaround for https://github.com/balanced/balanced-api/issues/141 Usage (tested on Mac OS): [gittip] $ open `heroku config | swaddle - ./find-in-balanced-by-username.py foobar 2> /dev/null` The script will search for the user and print out the URI of their page in the Balanced dashboard, and open will open it in your default web browser. """ import sys import balanced from gittip import wireup wireup.billing() email_address = sys.argv[1] + "@gittip.com" # hack into an email address api_uri = balanced.Account.query.filter(email_address=email_address).one().uri dashboard_uri = "https://www.balancedpayments.com/" + api_uri[4:] print dashboard_uri
Add a script to workaround lack of account search
Add a script to workaround lack of account search Discovered the need for this in the course of #312. See also: https://github.com/balanced/balanced-api/issues/141
Python
cc0-1.0
mccolgst/www.gittip.com,bountysource/www.gittip.com,studio666/gratipay.com,mccolgst/www.gittip.com,MikeFair/www.gittip.com,mccolgst/www.gittip.com,bountysource/www.gittip.com,MikeFair/www.gittip.com,eXcomm/gratipay.com,gratipay/gratipay.com,bountysource/www.gittip.com,gratipay/gratipay.com,eXcomm/gratipay.com,MikeFair/www.gittip.com,studio666/gratipay.com,eXcomm/gratipay.com,studio666/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com,gratipay/gratipay.com,studio666/gratipay.com,bountysource/www.gittip.com,eXcomm/gratipay.com
--- +++ @@ -0,0 +1,24 @@ +#!./env/bin/python +"""This is a workaround for https://github.com/balanced/balanced-api/issues/141 + +Usage (tested on Mac OS): + + [gittip] $ open `heroku config | swaddle - ./find-in-balanced-by-username.py foobar 2> /dev/null` + +The script will search for the user and print out the URI of their page in the +Balanced dashboard, and open will open it in your default web browser. + +""" +import sys + +import balanced +from gittip import wireup + + +wireup.billing() + + +email_address = sys.argv[1] + "@gittip.com" # hack into an email address +api_uri = balanced.Account.query.filter(email_address=email_address).one().uri +dashboard_uri = "https://www.balancedpayments.com/" + api_uri[4:] +print dashboard_uri
f824dc45b49ab6fdac14ddea81fcca470253fd1f
open511/scripts/mtl_kml_to_open511.py
open511/scripts/mtl_kml_to_open511.py
import hashlib import sys import tempfile from django.contrib.gis.gdal import DataSource from lxml import etree import lxml.html from open511.serialization import roadevent_to_xml_element, get_base_open511_element JURISDICTION = 'converted.ville.montreal.qc.ca' ids_seen = set() class DummyRoadEvent(object): pass def feature_to_open511_element(feature): """Transform an OGR Feature from the KML input into an XML Element for a RoadEvent.""" rdev = DummyRoadEvent() rdev.geom = feature.geom # Using a hash of the geometry for an ID. For proper production use, # there'll probably have to be some code in the importer # that compares to existing entries in the DB to determine whether # this is new or modified... geom_hash = hashlib.md5(feature.geom.wkt).hexdigest() rdev.source_id = JURISDICTION + ':' + geom_hash while rdev.source_id in ids_seen: rdev.source_id += 'x' ids_seen.add(rdev.source_id) rdev.title = feature.get('Name').decode('utf8') blob = lxml.html.fragment_fromstring(feature.get('Description').decode('utf8'), create_parent='content') description_label = blob.xpath('//strong[text()="Description"]') if description_label: description_bits = [] el = description_label[0].getnext() while el.tag == 'p': description_bits.append(_get_el_text(el)) el = el.getnext() rdev.description = '\n\n'.join(description_bits) localisation = blob.cssselect('div#localisation p') if localisation: rdev.affected_roads = '\n\n'.join(_get_el_text(el) for el in localisation) return roadevent_to_xml_element(rdev) def kml_file_to_open511_element(filename): """Transform a Montreal KML file, at filename, into an Element for the top-level <open511> element.""" ds = DataSource(filename) base_element = get_base_open511_element() for layer in ds: for feature in layer: base_element.append(feature_to_open511_element(feature)) return base_element def _get_el_text(el): t = el.text if el.text else '' for subel in el: t += _get_el_text(subel) if subel.tail: t += subel.tail return t if __name__ == '__main__': filename = sys.argv[1] el = kml_file_to_open511_element(filename) print etree.tostring(el, pretty_print=True)
Add preliminary script to convert Ville de Montreal KML
Add preliminary script to convert Ville de Montreal KML
Python
mit
Open511/open511-server,Open511/open511-server,Open511/open511-server
--- +++ @@ -0,0 +1,75 @@ +import hashlib +import sys +import tempfile + +from django.contrib.gis.gdal import DataSource +from lxml import etree +import lxml.html + +from open511.serialization import roadevent_to_xml_element, get_base_open511_element + +JURISDICTION = 'converted.ville.montreal.qc.ca' + +ids_seen = set() + +class DummyRoadEvent(object): + pass + +def feature_to_open511_element(feature): + """Transform an OGR Feature from the KML input into an XML Element for a RoadEvent.""" + rdev = DummyRoadEvent() + + rdev.geom = feature.geom + + # Using a hash of the geometry for an ID. For proper production use, + # there'll probably have to be some code in the importer + # that compares to existing entries in the DB to determine whether + # this is new or modified... + geom_hash = hashlib.md5(feature.geom.wkt).hexdigest() + rdev.source_id = JURISDICTION + ':' + geom_hash + while rdev.source_id in ids_seen: + rdev.source_id += 'x' + ids_seen.add(rdev.source_id) + + rdev.title = feature.get('Name').decode('utf8') + + blob = lxml.html.fragment_fromstring(feature.get('Description').decode('utf8'), + create_parent='content') + + description_label = blob.xpath('//strong[text()="Description"]') + if description_label: + description_bits = [] + el = description_label[0].getnext() + while el.tag == 'p': + description_bits.append(_get_el_text(el)) + el = el.getnext() + rdev.description = '\n\n'.join(description_bits) + + localisation = blob.cssselect('div#localisation p') + if localisation: + rdev.affected_roads = '\n\n'.join(_get_el_text(el) for el in localisation) + + return roadevent_to_xml_element(rdev) + +def kml_file_to_open511_element(filename): + """Transform a Montreal KML file, at filename, into an Element + for the top-level <open511> element.""" + ds = DataSource(filename) + base_element = get_base_open511_element() + for layer in ds: + for feature in layer: + base_element.append(feature_to_open511_element(feature)) + return base_element + +def _get_el_text(el): + t = el.text if el.text else '' + for subel in el: + t += _get_el_text(subel) + if subel.tail: + t += subel.tail + return t + +if __name__ == '__main__': + filename = sys.argv[1] + el = kml_file_to_open511_element(filename) + print etree.tostring(el, pretty_print=True)
d4b3b0d9560ecf059c73d7c2e4395bb955575a78
CodeFights/messageFromBinaryCode.py
CodeFights/messageFromBinaryCode.py
#!/usr/local/bin/python # Code Fights Message from Binary Code Problem def messageFromBinaryCode(code): sz = 8 return ''.join([chr(int(code[i:i + sz], 2)) for i in range(0, len(code), sz)]) def main(): tests = [ ["010010000110010101101100011011000110111100100001", "Hello!"], ["01001101011000010111100100100000011101000110100001100101001000000100" "01100110111101110010011000110110010100100000011000100110010100100000" "0111011101101001011101000110100000100000011110010110111101110101", "May the Force be with you"], ["01011001011011110111010100100000011010000110000101100100001000000110" "11010110010100100000011000010111010000100000011000000110100001100101" "01101100011011000110111100101110", "You had me at `hello."] ] for t in tests: res = messageFromBinaryCode(t[0]) ans = t[1] if ans == res: print("PASSED: messageFromBinaryCode({}) returned {}" .format(t[0], res)) else: print("FAILED: messageFromBinaryCode({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights message from binary code problem
Solve Code Fights message from binary code problem
Python
mit
HKuz/Test_Code
--- +++ @@ -0,0 +1,36 @@ +#!/usr/local/bin/python +# Code Fights Message from Binary Code Problem + + +def messageFromBinaryCode(code): + sz = 8 + return ''.join([chr(int(code[i:i + sz], 2)) for i in + range(0, len(code), sz)]) + + +def main(): + tests = [ + ["010010000110010101101100011011000110111100100001", "Hello!"], + ["01001101011000010111100100100000011101000110100001100101001000000100" + "01100110111101110010011000110110010100100000011000100110010100100000" + "0111011101101001011101000110100000100000011110010110111101110101", + "May the Force be with you"], + ["01011001011011110111010100100000011010000110000101100100001000000110" + "11010110010100100000011000010111010000100000011000000110100001100101" + "01101100011011000110111100101110", + "You had me at `hello."] + ] + + for t in tests: + res = messageFromBinaryCode(t[0]) + ans = t[1] + if ans == res: + print("PASSED: messageFromBinaryCode({}) returned {}" + .format(t[0], res)) + else: + print("FAILED: messageFromBinaryCode({}) returned {}, answer: {}" + .format(t[0], res, ans)) + + +if __name__ == '__main__': + main()
b20a8a86675c931c033600669b3909cb3c4e010d
examples/__init__.py
examples/__init__.py
import os import ujson HERE = os.path.abspath(os.path.dirname(__file__)) def load_api(filename): ''' Helper to load api specifications in the examples folder. Returns a nested dict appropriate for unpacking into Client or Service ''' api_filename = os.path.join(HERE, filename) with open(api_filename) as api_file: api = ujson.loads(api_file.read()) return api
Add examples folder for small service demos
Add examples folder for small service demos
Python
mit
numberoverzero/pyservice
--- +++ @@ -0,0 +1,16 @@ +import os +import ujson + +HERE = os.path.abspath(os.path.dirname(__file__)) + + +def load_api(filename): + ''' + Helper to load api specifications in the examples folder. + + Returns a nested dict appropriate for unpacking into Client or Service + ''' + api_filename = os.path.join(HERE, filename) + with open(api_filename) as api_file: + api = ujson.loads(api_file.read()) + return api
0d38954e4c595920fa707333835d043959c71d71
sqlinit.py
sqlinit.py
import sys sys.path.append('./sqlbase') from sqlalchemy import create_engine from sqlbase import Base, WeatherData DB_Connection = 'postgresql://weather:weather@localhost:5432/weather' engine = create_engine(DB_Connection) Base.metadata.create_all(engine)
Add script to populate the database
Add script to populate the database
Python
apache-2.0
josecastroleon/GroveWeatherPi
--- +++ @@ -0,0 +1,11 @@ +import sys + +sys.path.append('./sqlbase') + +from sqlalchemy import create_engine +from sqlbase import Base, WeatherData + +DB_Connection = 'postgresql://weather:weather@localhost:5432/weather' + +engine = create_engine(DB_Connection) +Base.metadata.create_all(engine)
9e23f0a0546c80cb348de5faad351b0ceb0b4837
Arrays/different_func.py
Arrays/different_func.py
""" Apply different function over an array """ def square(num): return num ** 2 def cube(num): return num ** 3 def is_pair(num): return num % 2 functions = [square, cube, is_pair] array = range(0,20) for elemn in array: value = map(lambda x: x(elemn), functions) print (elemn, value)
Add an awesome way to apply different functions over an array.
Add an awesome way to apply different functions over an array.
Python
mit
xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book,xdanielsb/Marathon-book
--- +++ @@ -0,0 +1,20 @@ +""" + Apply different function over an array +""" + +def square(num): + return num ** 2 + +def cube(num): + return num ** 3 + +def is_pair(num): + return num % 2 + +functions = [square, cube, is_pair] + +array = range(0,20) + +for elemn in array: + value = map(lambda x: x(elemn), functions) + print (elemn, value)
2e72dcb52c23690c6f1b41cfff1948f18506293b
exercises/chapter_03/exercise_03_04/exercise_04_04.py
exercises/chapter_03/exercise_03_04/exercise_04_04.py
# 3-4 Guest List guest_list = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"] message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday." print(message) message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday." print(message) message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday." print(message) message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday." print(message)
Add solution to exercise 4.4.
Add solution to exercise 4.4.
Python
mit
HenrikSamuelsson/python-crash-course
--- +++ @@ -0,0 +1,15 @@ +# 3-4 Guest List + +guest_list = ["Albert Einstein", "Isac Newton", "Marie Curie", "Galileo Galilei"] + +message = "Hi " + guest_list[0] + " you are invited to dinner at 7 on saturday." +print(message) + +message = "Hi " + guest_list[1] + " you are invited to dinner at 7 on saturday." +print(message) + +message = "Hi " + guest_list[2] + " you are invited to dinner at 7 on saturday." +print(message) + +message = "Hi " + guest_list[3] + " you are invited to dinner at 7 on saturday." +print(message)
a5f591a71e460130055aafd16b248f7f61d0c541
snippets/python/nested.py
snippets/python/nested.py
#!/usr/bin/env python # -*- coding: UTF-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest # To test this module: # python -m unittest -v nested def string_maxlen(txt,max_len=12): n = len(txt) if n <= max_len: return txt elif n > max_len-3: return txt[:(max_len-3)] + '...' # python -m unittest -v nested.TestStringMaxLength class TestStringMaxLength(unittest.TestCase): def test_short_strings(self): self.assertEqual(string_maxlen('abcdefghij'), 'abcdefghij') self.assertEqual(string_maxlen('abcdefghijk'), 'abcdefghijk') self.assertEqual(string_maxlen('abcdefghijkl'), 'abcdefghijkl') def test_long_strings(self): self.assertEqual(string_maxlen('abcdefghijklm'), 'abcdefghi...') self.assertEqual(string_maxlen('abcdefghijklmn'), 'abcdefghi...') def print_structure(elmt,level=0,max_level=5): txt='' if level > max_level: return txt whitespace = " " * (level * 2) if isinstance(elmt, (dict)) is True: for k in elmt.keys(): if type(elmt[k])==int: txt += whitespace + "+'{0}': {1}\n".format(k,elmt[k]) elif type(elmt[k])==str: my_str=elmt[k][:40] txt += whitespace + "+'{0}': '{1}'\n".format(k,my_str) else: txt += whitespace + "+'{0}': {1}\n".format(k,type(elmt[k])) txt += print_structure(elmt[k],level+1,max_level=max_level) elif isinstance(elmt, (list)) is True: txt += whitespace + "+[list]\n" if len(elmt) > 0: txt += print_structure(elmt[0],level+1,max_level=max_level) else: pass return txt # python -m unittest -v nested.TestPrintStructure class TestPrintStructure(unittest.TestCase): def test_dict_int(self): d = {'integer': 123} self.assertEqual(print_structure(d),"+'integer': 123\n") def test_dict_string(self): d = {'string': 'abc'} self.assertEqual(print_structure(d),"+'string': 'abc'\n") def test_dict_list(self): d = {'list': [1,2,3]} self.assertEqual(print_structure(d),"+'list': <class 'list'>\n +[list]\n") if __name__ == "__main__": unittest.main()
Add subroutine to print a dictionary tree
Add subroutine to print a dictionary tree
Python
apache-2.0
nathanielng/code-templates,nathanielng/code-templates,nathanielng/code-templates
--- +++ @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: UTF-8 -*- + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import unittest + +# To test this module: +# python -m unittest -v nested + +def string_maxlen(txt,max_len=12): + n = len(txt) + if n <= max_len: + return txt + elif n > max_len-3: + return txt[:(max_len-3)] + '...' + +# python -m unittest -v nested.TestStringMaxLength +class TestStringMaxLength(unittest.TestCase): + def test_short_strings(self): + self.assertEqual(string_maxlen('abcdefghij'), 'abcdefghij') + self.assertEqual(string_maxlen('abcdefghijk'), 'abcdefghijk') + self.assertEqual(string_maxlen('abcdefghijkl'), 'abcdefghijkl') + + def test_long_strings(self): + self.assertEqual(string_maxlen('abcdefghijklm'), 'abcdefghi...') + self.assertEqual(string_maxlen('abcdefghijklmn'), 'abcdefghi...') + +def print_structure(elmt,level=0,max_level=5): + txt='' + if level > max_level: + return txt + + whitespace = " " * (level * 2) + if isinstance(elmt, (dict)) is True: + for k in elmt.keys(): + if type(elmt[k])==int: + txt += whitespace + "+'{0}': {1}\n".format(k,elmt[k]) + elif type(elmt[k])==str: + my_str=elmt[k][:40] + txt += whitespace + "+'{0}': '{1}'\n".format(k,my_str) + else: + txt += whitespace + "+'{0}': {1}\n".format(k,type(elmt[k])) + txt += print_structure(elmt[k],level+1,max_level=max_level) + elif isinstance(elmt, (list)) is True: + txt += whitespace + "+[list]\n" + if len(elmt) > 0: + txt += print_structure(elmt[0],level+1,max_level=max_level) + else: + pass + return txt + +# python -m unittest -v nested.TestPrintStructure +class TestPrintStructure(unittest.TestCase): + def test_dict_int(self): + d = {'integer': 123} + self.assertEqual(print_structure(d),"+'integer': 123\n") + + def test_dict_string(self): + d = {'string': 'abc'} + self.assertEqual(print_structure(d),"+'string': 'abc'\n") + + def test_dict_list(self): + d = {'list': [1,2,3]} + self.assertEqual(print_structure(d),"+'list': <class 'list'>\n +[list]\n") + +if __name__ == "__main__": + unittest.main()
cc764d3101324bc10e64664e7aac7af6a9fee85b
Core/communication_controller.py
Core/communication_controller.py
import socket import httplib class CommunicationControl(): def sendTCPMessage(self, ipAddress, port, message): BUFFER_SIZE = 1024 port = int(port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((ipAddress, port)) s.send(message) data = s.recv(BUFFER_SIZE) s.close() return data def sendUDPMessage(self, ipAddress, port, message): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP sock.sendto(message, (ipAddress, port)) return True def sendHTTPGetRequest(self, ipAddress, port, urlLocation): conn = httplib.HTTPConnection(str(ipAddress), int(port)) conn.request("GET", urlLocation) data = conn.getresponse() return data
Move communication controller to the core
Move communication controller to the core
Python
mit
Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation
--- +++ @@ -0,0 +1,22 @@ +import socket +import httplib + +class CommunicationControl(): + def sendTCPMessage(self, ipAddress, port, message): + BUFFER_SIZE = 1024 + port = int(port) + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect((ipAddress, port)) + s.send(message) + data = s.recv(BUFFER_SIZE) + s.close() + return data + def sendUDPMessage(self, ipAddress, port, message): + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP + sock.sendto(message, (ipAddress, port)) + return True + def sendHTTPGetRequest(self, ipAddress, port, urlLocation): + conn = httplib.HTTPConnection(str(ipAddress), int(port)) + conn.request("GET", urlLocation) + data = conn.getresponse() + return data
5109ced931692a0c09efcd2dfc5131537b32e4cc
catalogue_api/get_api_versions.py
catalogue_api/get_api_versions.py
#!/usr/bin/env python # -*- encoding: utf-8 """ Prints information about which version of the API is currently running, so you can create a new set of pins. """ import os import boto3 import hcl API_DIR = os.path.dirname(os.path.realpath(__file__)) API_TF = os.path.join(API_DIR, 'terraform') def bold(message): # This uses ANSI escape codes to print a message in a bright color # to make it stand out more in a console. return f'\033[91m{message}\033[0m' def get_ecs_api_info(name): """ Given the name of an API (remus or romulus), return the container versions which are currently running in ECS. """ assert name in ('remus', 'romulus') ecs = boto3.client('ecs') resp = ecs.describe_services( cluster='api_cluster', services=[f'api_{name}_v1'] ) assert len(resp['services']) == 1, resp task_definition = resp['services'][0]['taskDefinition'] resp = ecs.describe_task_definition( taskDefinition=task_definition ) assert len(resp['taskDefinition']['containerDefinitions']) == 2, resp containers = resp['taskDefinition']['containerDefinitions'] images = [c['image'] for c in containers] # The names of the images are in the form: # # {ecr_repo}/uk.ac.wellcome/{api|nginx_api}:{tag} # image_names = [name.split('/')[-1] for name in images] return dict(name.split(':', 2) for name in image_names) if __name__ == '__main__': with open(os.path.join(API_TF, 'variables.tf')) as var_tf: variables = hcl.load(var_tf)['variable'] prod_api = variables['production_api']['default'] prod_api_info = get_ecs_api_info(prod_api) staging_api = 'remus' if prod_api == 'romulus' else 'romulus' staging_api_info = get_ecs_api_info(staging_api) print(f'The prod API is {bold(prod_api)}') print(f'- api = {bold(prod_api_info["api"])}') print(f'- nginx = {bold(prod_api_info["nginx_api"])}') print('') print(f'The staging API is {bold(staging_api)}') print(f'- api = {bold(staging_api_info["api"])}') print(f'- nginx = {bold(staging_api_info["nginx_api"])}')
Add a script for getting the current versions of the API
Add a script for getting the current versions of the API
Python
mit
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
--- +++ @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 +""" +Prints information about which version of the API is currently running, +so you can create a new set of pins. +""" + +import os + +import boto3 +import hcl + + +API_DIR = os.path.dirname(os.path.realpath(__file__)) +API_TF = os.path.join(API_DIR, 'terraform') + + +def bold(message): + # This uses ANSI escape codes to print a message in a bright color + # to make it stand out more in a console. + return f'\033[91m{message}\033[0m' + + +def get_ecs_api_info(name): + """ + Given the name of an API (remus or romulus), return the container + versions which are currently running in ECS. + """ + assert name in ('remus', 'romulus') + + ecs = boto3.client('ecs') + resp = ecs.describe_services( + cluster='api_cluster', + services=[f'api_{name}_v1'] + ) + assert len(resp['services']) == 1, resp + task_definition = resp['services'][0]['taskDefinition'] + + resp = ecs.describe_task_definition( + taskDefinition=task_definition + ) + assert len(resp['taskDefinition']['containerDefinitions']) == 2, resp + containers = resp['taskDefinition']['containerDefinitions'] + images = [c['image'] for c in containers] + + # The names of the images are in the form: + # + # {ecr_repo}/uk.ac.wellcome/{api|nginx_api}:{tag} + # + image_names = [name.split('/')[-1] for name in images] + + return dict(name.split(':', 2) for name in image_names) + + +if __name__ == '__main__': + with open(os.path.join(API_TF, 'variables.tf')) as var_tf: + variables = hcl.load(var_tf)['variable'] + + prod_api = variables['production_api']['default'] + prod_api_info = get_ecs_api_info(prod_api) + + staging_api = 'remus' if prod_api == 'romulus' else 'romulus' + staging_api_info = get_ecs_api_info(staging_api) + + print(f'The prod API is {bold(prod_api)}') + print(f'- api = {bold(prod_api_info["api"])}') + print(f'- nginx = {bold(prod_api_info["nginx_api"])}') + print('') + + print(f'The staging API is {bold(staging_api)}') + print(f'- api = {bold(staging_api_info["api"])}') + print(f'- nginx = {bold(staging_api_info["nginx_api"])}')
f294b11f68787bd1a8424bb68229acdb1049e03b
parsetree_to_triple.py
parsetree_to_triple.py
import sys class Tag: # based on http://nlp.stanford.edu:8080/ner/process with classifier english.muc.7class.distsim.crf.ser.gz nill = 0 location = 1 time = 2 person = 3 organization = 4 money = 5 percent = 6 date = 7 class Node: """ One node of the parse tree. It is a group of words of same NamedEntityTag (e.g. George Washington). """ def __init__(self, word_list, namedentitytag, subnodes=[]): self.words = word_list self.child = subnodes if(namedentitytag=="O"): self.tag = Tag.nill if(namedentitytag=="LOCATION"): self.tag = Tag.location elif(namedentitytag=="TIME"): self.tag = Tag.time elif(namedentitytag=="PERSON"): self.tag = Tag.person elif(namedentitytag=="ORGANIZATION"): self.tag = Tag.organization elif(namedentitytag=="MONEY"): self.tag = Tag.money elif(namedentitytag=="PERCENT"): self.tag = Tag.percent elif(namedentitytag=="DATE"): self.tag = Tag.date else: self.tag = Tag.nill print("ERROR: unknown NamedEntityTag, set it to nill.",file=sys.stderr)
Create class for tree nodes.
Create class for tree nodes.
Python
agpl-3.0
ProjetPP/PPP-QuestionParsing-Grammatical,ProjetPP/PPP-QuestionParsing-Grammatical
--- +++ @@ -0,0 +1,40 @@ +import sys + +class Tag: # based on http://nlp.stanford.edu:8080/ner/process with classifier english.muc.7class.distsim.crf.ser.gz + nill = 0 + location = 1 + time = 2 + person = 3 + organization = 4 + money = 5 + percent = 6 + date = 7 + + +class Node: + """ + One node of the parse tree. + It is a group of words of same NamedEntityTag (e.g. George Washington). + """ + def __init__(self, word_list, namedentitytag, subnodes=[]): + self.words = word_list + self.child = subnodes + if(namedentitytag=="O"): + self.tag = Tag.nill + if(namedentitytag=="LOCATION"): + self.tag = Tag.location + elif(namedentitytag=="TIME"): + self.tag = Tag.time + elif(namedentitytag=="PERSON"): + self.tag = Tag.person + elif(namedentitytag=="ORGANIZATION"): + self.tag = Tag.organization + elif(namedentitytag=="MONEY"): + self.tag = Tag.money + elif(namedentitytag=="PERCENT"): + self.tag = Tag.percent + elif(namedentitytag=="DATE"): + self.tag = Tag.date + else: + self.tag = Tag.nill + print("ERROR: unknown NamedEntityTag, set it to nill.",file=sys.stderr)
a3c1a83a44764564e8110cc0668a8ba463759d9b
indra/preassembler/make_wm_ontmap.py
indra/preassembler/make_wm_ontmap.py
from indra.sources import eidos from indra.sources.hume.make_hume_tsv import make_file from indra.java_vm import autoclass eidos_package = 'org.clulab.wm.eidos' if __name__ == '__main__': bbn_path = 'hume_examaples.tsv' make_file(bbn_path) sofia_path = 'sofia_examples.tsv' om = autoclass(eidos_package + '.apps.OntologyMapper') eidos = autoclass(eidos_package + '.EidosSystem') es = eidos(autoclass('java.lang.Object')()) example_weight = 0.8 parent_weight = 0.1 topn = 10 table_str = om.mapOntologies(es, bbn_path, sofia_path, example_weight, parent_weight, topn)
Implement generating the ontology map
Implement generating the ontology map
Python
bsd-2-clause
pvtodorov/indra,johnbachman/belpy,bgyori/indra,johnbachman/belpy,pvtodorov/indra,johnbachman/indra,pvtodorov/indra,sorgerlab/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,bgyori/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/belpy,pvtodorov/indra
--- +++ @@ -0,0 +1,20 @@ +from indra.sources import eidos +from indra.sources.hume.make_hume_tsv import make_file +from indra.java_vm import autoclass + +eidos_package = 'org.clulab.wm.eidos' + +if __name__ == '__main__': + bbn_path = 'hume_examaples.tsv' + make_file(bbn_path) + sofia_path = 'sofia_examples.tsv' + + om = autoclass(eidos_package + '.apps.OntologyMapper') + eidos = autoclass(eidos_package + '.EidosSystem') + es = eidos(autoclass('java.lang.Object')()) + + example_weight = 0.8 + parent_weight = 0.1 + topn = 10 + table_str = om.mapOntologies(es, bbn_path, sofia_path, example_weight, + parent_weight, topn)
6187cb81c7d80dc05f25fb399a9d01ee61fa93d6
distarray/tests/test_odin_local.py
distarray/tests/test_odin_local.py
import numpy as np from distarray.client import DistArrayContext, DistArrayProxy from IPython.parallel import Client c = Client() dv = c[:] dac = DistArrayContext(dv) da = dac.empty((1024, 1024)) da.fill(2 * np.pi) def local(context): def wrap(fn): func_key = context._generate_key() context.view.push({func_key: fn}, targets=context.targets, block=True) result_key = context._generate_key() def inner(a): err_msg_fmt = "distarray context mismatch: {} {}" assert context == a.context, err_msg_fmt.format(context, a.context) context._execute('%s = %s(%s)' % (result_key, func_key, a.key)) return DistArrayProxy(result_key, context) return inner return wrap @local(dac) def localsin(da): return np.sin(da) @local(dac) def localadd50(da): return da + 50 @local(dac) def localsum(da): return np.sum(da) dv.execute('import numpy as np') db = localsin(da) dc = localadd50(da) dd = localsum(da) #assert_allclose(db, 0)
Add a simple decorator for local evaluation of functions.
Add a simple decorator for local evaluation of functions.
Python
bsd-3-clause
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
--- +++ @@ -0,0 +1,52 @@ +import numpy as np +from distarray.client import DistArrayContext, DistArrayProxy +from IPython.parallel import Client + +c = Client() +dv = c[:] +dac = DistArrayContext(dv) + +da = dac.empty((1024, 1024)) +da.fill(2 * np.pi) + + +def local(context): + + def wrap(fn): + + func_key = context._generate_key() + context.view.push({func_key: fn}, targets=context.targets, + block=True) + result_key = context._generate_key() + + def inner(a): + err_msg_fmt = "distarray context mismatch: {} {}" + assert context == a.context, err_msg_fmt.format(context, a.context) + context._execute('%s = %s(%s)' % (result_key, func_key, a.key)) + return DistArrayProxy(result_key, context) + + return inner + + return wrap + + +@local(dac) +def localsin(da): + return np.sin(da) + + +@local(dac) +def localadd50(da): + return da + 50 + + +@local(dac) +def localsum(da): + return np.sum(da) + + +dv.execute('import numpy as np') +db = localsin(da) +dc = localadd50(da) +dd = localsum(da) +#assert_allclose(db, 0)
6e4e5cb5d32ac650d5ae08b47fe8e1c9d7e2ec04
tests/test_cfg_thumb_firmware.py
tests/test_cfg_thumb_firmware.py
import os import angr from nose.tools import assert_equal, assert_true test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')) def test_thumb_firmware_cfg(): """ Test an ARM firmware sample. This tests CFG, but also the Gym (the ThumbSpotter, etc) Also requires proper relocs support, or You're Gonna Have a Bad Time(tm) In short, a very comprehensive high level test :return: """ path = os.path.join(test_location, "armel", "i2c_master_read-nucleol152re.elf") p = angr.Project(path, auto_load_libs=False) # This is the canonical way to carve up a nasty firmware thing. cfg = p.analyses.CFGFast(resolve_indirect_jumps=True, force_complete_scan=False, normalize=True) # vfprintf should return; this function has a weird C++ thing that gets compiled as a tail-call. # The function itself must return, and _NOT_ contain its callee. vfprintf = p.kb.functions[p.loader.find_symbol('vfprintf').rebased_addr] assert_true(vfprintf.returning) assert_true(len(list(vfprintf.blocks)) == 1) # The function should have one "transition" block = list(vfprintf.endpoints_with_type['transition'])[0] assert_true(len(block.successors()) == 1) succ = list(block.successors())[0] assert_true(succ.addr == 0x080081dd) f2 = p.kb.functions[succ.addr] assert_true(f2.name == '_vfprintf_r') assert_true(f2.returning) if __name__ == "__main__": test_thumb_firmware_cfg()
Add a new test using an ARM firmware
Add a new test using an ARM firmware
Python
bsd-2-clause
schieb/angr,schieb/angr,schieb/angr,angr/angr,angr/angr,angr/angr,iamahuman/angr,iamahuman/angr,iamahuman/angr
--- +++ @@ -0,0 +1,40 @@ + +import os + +import angr +from nose.tools import assert_equal, assert_true + +test_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'binaries', 'tests')) + + +def test_thumb_firmware_cfg(): + """ + Test an ARM firmware sample. + + This tests CFG, but also the Gym (the ThumbSpotter, etc) + Also requires proper relocs support, or You're Gonna Have a Bad Time(tm) + In short, a very comprehensive high level test + :return: + """ + path = os.path.join(test_location, "armel", "i2c_master_read-nucleol152re.elf") + p = angr.Project(path, auto_load_libs=False) + + # This is the canonical way to carve up a nasty firmware thing. + cfg = p.analyses.CFGFast(resolve_indirect_jumps=True, force_complete_scan=False, normalize=True) + + # vfprintf should return; this function has a weird C++ thing that gets compiled as a tail-call. + # The function itself must return, and _NOT_ contain its callee. + vfprintf = p.kb.functions[p.loader.find_symbol('vfprintf').rebased_addr] + assert_true(vfprintf.returning) + assert_true(len(list(vfprintf.blocks)) == 1) + # The function should have one "transition" + block = list(vfprintf.endpoints_with_type['transition'])[0] + assert_true(len(block.successors()) == 1) + succ = list(block.successors())[0] + assert_true(succ.addr == 0x080081dd) + f2 = p.kb.functions[succ.addr] + assert_true(f2.name == '_vfprintf_r') + assert_true(f2.returning) + +if __name__ == "__main__": + test_thumb_firmware_cfg()
841a84d940ff1dc8e4751ef31acb25bc3e1497da
tests/test_ppc64_initial_rtoc.py
tests/test_ppc64_initial_rtoc.py
#!/usr/bin/env python import nose import logging import cle import os test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), os.path.join('..', '..', 'binaries', 'tests')) def test_ppc64el_abiv2(): # ABIv2: 'TOC pointer register typically points to the beginning of the .got # section + 0x8000.' For more details, see: # http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/dbdoclet.50655241_66700.html libc = os.path.join(test_location, 'ppc64el', 'fauxware_static') ld = cle.Loader(libc, auto_load_libs=False) nose.tools.assert_false(ld.main_object.is_ppc64_abiv1) nose.tools.assert_true(ld.main_object.is_ppc64_abiv2) nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x100e7b00) # ABIv2, PIC libc = os.path.join(test_location, 'ppc64el', 'fauxware') ld = cle.Loader(libc, auto_load_libs=False, main_opts={'base_addr': 0}) nose.tools.assert_false(ld.main_object.is_ppc64_abiv1) nose.tools.assert_true(ld.main_object.is_ppc64_abiv2) nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x27f00) def test_ppc64el_abiv1(): # ABIv1: TOC value can be determined by 'function descriptor pointed at by # the e_entry field in the ELF header.' For more details, see: # https://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#PROC-REG libc = os.path.join(test_location, 'ppc64', 'fauxware') ld = cle.Loader(libc, auto_load_libs=False) nose.tools.assert_true(ld.main_object.is_ppc64_abiv1) nose.tools.assert_false(ld.main_object.is_ppc64_abiv2) nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x10018e80) if __name__ == "__main__": logging.basicConfig(level=logging.INFO) test_ppc64el_abiv1() test_ppc64el_abiv2()
Add test case for ppc64 initial rtoc value
Add test case for ppc64 initial rtoc value
Python
bsd-2-clause
angr/cle
--- +++ @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +import nose +import logging +import cle + +import os +test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), + os.path.join('..', '..', 'binaries', 'tests')) + +def test_ppc64el_abiv2(): + # ABIv2: 'TOC pointer register typically points to the beginning of the .got + # section + 0x8000.' For more details, see: + # http://openpowerfoundation.org/wp-content/uploads/resources/leabi/content/dbdoclet.50655241_66700.html + libc = os.path.join(test_location, 'ppc64el', 'fauxware_static') + ld = cle.Loader(libc, auto_load_libs=False) + nose.tools.assert_false(ld.main_object.is_ppc64_abiv1) + nose.tools.assert_true(ld.main_object.is_ppc64_abiv2) + nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x100e7b00) + + # ABIv2, PIC + libc = os.path.join(test_location, 'ppc64el', 'fauxware') + ld = cle.Loader(libc, auto_load_libs=False, main_opts={'base_addr': 0}) + nose.tools.assert_false(ld.main_object.is_ppc64_abiv1) + nose.tools.assert_true(ld.main_object.is_ppc64_abiv2) + nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x27f00) + +def test_ppc64el_abiv1(): + # ABIv1: TOC value can be determined by 'function descriptor pointed at by + # the e_entry field in the ELF header.' For more details, see: + # https://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi-1.9.html#PROC-REG + libc = os.path.join(test_location, 'ppc64', 'fauxware') + ld = cle.Loader(libc, auto_load_libs=False) + nose.tools.assert_true(ld.main_object.is_ppc64_abiv1) + nose.tools.assert_false(ld.main_object.is_ppc64_abiv2) + nose.tools.assert_equal(ld.main_object.ppc64_initial_rtoc, 0x10018e80) + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + test_ppc64el_abiv1() + test_ppc64el_abiv2()
e7534c6d5fd5c7d76c56d48be12302b596b35d29
skyfield/tests/test_strs_and_reprs.py
skyfield/tests/test_strs_and_reprs.py
import textwrap from ..api import Topos, load from ..sgp4lib import EarthSatellite def dedent(s): return textwrap.dedent(s.rstrip()) def eph(): yield load('de421.bsp') def test_jpl_segment(eph): e = eph['mercury barycenter'] expected = dedent("""\ Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 1 MERCURY BARYCENTER """) assert str(e) == expected expected = dedent("""\ <Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 1 MERCURY BARYCENTER> """) assert repr(e) == expected def test_satellite(eph): lines = [ 'ISS (ZARYA) ', '1 25544U 98067A 13330.58127943 .00000814 00000-0 21834-4 0 1064', '2 25544 51.6484 23.7537 0001246 74.1647 18.7420 15.50540527859894', ] s = EarthSatellite(lines, None) expected = dedent("""\ <EarthSatellite number=25544 epoch=2013-11-26T13:57:03Z> """) assert str(s) == expected expected = dedent("""\ <EarthSatellite number=25544 epoch=2013-11-26T13:57:03Z> """) assert repr(s) == expected def test_topos(eph): t = Topos(latitude_degrees=42.2, longitude_degrees=-88.1) expected = dedent("""\ <Topos 42deg 12' 00.0" N, -88deg 06' 00.0" E> """) assert str(t) == expected expected = dedent("""\ <Topos 42deg 12' 00.0" N, -88deg 06' 00.0" E> """) assert repr(t) == expected def test_vector_sum(eph): e = eph['earth'] expected = dedent("""\ Sum of 2 vectors: + Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER + Segment 'de421.bsp' 3 EARTH BARYCENTER -> 399 EARTH """) assert str(e) == expected expected = dedent("""\ <VectorSum of 2 vectors from center 0 to target 399> """) assert repr(e) == expected
Add test suite for str()'s and repr()'s
Add test suite for str()'s and repr()'s Before I jump into improvements to how the strings and reprs work, I really should have something in the unit tests about them. The clunky reporting from Sphinx's `make doctest`, which are the current tests that worry about strings and reprs, makes them awkward for development use. Plus, putting tests for all strings and reprs in one place makes it easier to see that all of them have test coverage!
Python
mit
skyfielders/python-skyfield,skyfielders/python-skyfield
--- +++ @@ -0,0 +1,60 @@ +import textwrap +from ..api import Topos, load +from ..sgp4lib import EarthSatellite + +def dedent(s): + return textwrap.dedent(s.rstrip()) + +def eph(): + yield load('de421.bsp') + +def test_jpl_segment(eph): + e = eph['mercury barycenter'] + expected = dedent("""\ + Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 1 MERCURY BARYCENTER + """) + assert str(e) == expected + expected = dedent("""\ + <Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 1 MERCURY BARYCENTER> + """) + assert repr(e) == expected + +def test_satellite(eph): + lines = [ + 'ISS (ZARYA) ', + '1 25544U 98067A 13330.58127943 .00000814 00000-0 21834-4 0 1064', + '2 25544 51.6484 23.7537 0001246 74.1647 18.7420 15.50540527859894', + ] + s = EarthSatellite(lines, None) + expected = dedent("""\ + <EarthSatellite number=25544 epoch=2013-11-26T13:57:03Z> + """) + assert str(s) == expected + expected = dedent("""\ + <EarthSatellite number=25544 epoch=2013-11-26T13:57:03Z> + """) + assert repr(s) == expected + +def test_topos(eph): + t = Topos(latitude_degrees=42.2, longitude_degrees=-88.1) + expected = dedent("""\ + <Topos 42deg 12' 00.0" N, -88deg 06' 00.0" E> + """) + assert str(t) == expected + expected = dedent("""\ + <Topos 42deg 12' 00.0" N, -88deg 06' 00.0" E> + """) + assert repr(t) == expected + +def test_vector_sum(eph): + e = eph['earth'] + expected = dedent("""\ + Sum of 2 vectors: + + Segment 'de421.bsp' 0 SOLAR SYSTEM BARYCENTER -> 3 EARTH BARYCENTER + + Segment 'de421.bsp' 3 EARTH BARYCENTER -> 399 EARTH + """) + assert str(e) == expected + expected = dedent("""\ + <VectorSum of 2 vectors from center 0 to target 399> + """) + assert repr(e) == expected
63a98b84709c4e981ea3fcf5493849948146e21d
thecut/forms/tests/test_utils.py
thecut/forms/tests/test_utils.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, unicode_literals from django.test import TestCase from mock import MagicMock from thecut.forms.utils import add_css_class class TestAddCssClass(TestCase): def test_add_new_css_class(self): widget = MagicMock() widget.attrs = {'class': 'a b'} widget = add_css_class(widget, 'c') self.assertEqual(set(widget.attrs.get('class', '').split()), {'a', 'b', 'c'}) def test_add_existing_css_class(self): widget = MagicMock() widget.attrs = {'class': 'a b'} widget = add_css_class(widget, 'b') self.assertEqual(set(widget.attrs.get('class', '').split()), {'a', 'b'})
Add unit test for add_css_class util function
Add unit test for add_css_class util function
Python
apache-2.0
thecut/thecut-forms,thecut/thecut-forms
--- +++ @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import, unicode_literals +from django.test import TestCase +from mock import MagicMock +from thecut.forms.utils import add_css_class + + +class TestAddCssClass(TestCase): + def test_add_new_css_class(self): + widget = MagicMock() + widget.attrs = {'class': 'a b'} + widget = add_css_class(widget, 'c') + self.assertEqual(set(widget.attrs.get('class', '').split()), + {'a', 'b', 'c'}) + + def test_add_existing_css_class(self): + widget = MagicMock() + widget.attrs = {'class': 'a b'} + widget = add_css_class(widget, 'b') + self.assertEqual(set(widget.attrs.get('class', '').split()), + {'a', 'b'})
4b3443d5cccf9a62ffceb7ea795ad0ef69811908
src/fft.py
src/fft.py
import cmath def fft0(xs): n, ys = len(xs), [] for i in range(n): yi = complex(0, 0) for j in range(n): yi += complex(xs[j]) * cmath.exp(complex(0, -2 * cmath.pi / n * i * j)) ys.append(yi) return ys if __name__ == '__main__': print(fft0([1, 2, 3]))
Add simple and untested FFT transform
Add simple and untested FFT transform
Python
mit
all3fox/algos-py
--- +++ @@ -0,0 +1,17 @@ +import cmath + +def fft0(xs): + n, ys = len(xs), [] + + for i in range(n): + yi = complex(0, 0) + + for j in range(n): + yi += complex(xs[j]) * cmath.exp(complex(0, -2 * cmath.pi / n * i * j)) + + ys.append(yi) + + return ys + +if __name__ == '__main__': + print(fft0([1, 2, 3]))
90ac14b61066f6039df5d1522b7ac6bd76779b7b
tests.py
tests.py
from tfidf_lsa import calculate_corpus_var import json import os import shutil import subprocess import unittest class TestMoviePepper(unittest.TestCase): def test_crawl(self): try: shutil.rmtree('./movie_scrape/crawls') os.remove('./movie_scrape/imdb.json') os.remove('./db.json') except FileNotFoundError: pass except OSError: pass subprocess.run(['START_URL="http://www.imdb.com/search/title?role=nm0000095&title_type=feature&user_rating=8.0,10" ./scrap.sh'], cwd="./movie_scrape/", shell=True) try: with open('./movie_scrape/imdb.json', 'r') as in_file: json.load(in_file) except: self.fail() def test_tfidf_lsa(self): calculate_corpus_var(max_df=200, min_df=2, n_components=10, max_features=None) try: with open('./db.json', 'r') as in_file: json.load(in_file) except: self.fail() if __name__ == '__main__': unittest.main()
Add a basic crawler and tfidf_lsa creation test
Add a basic crawler and tfidf_lsa creation test
Python
mit
hugo19941994/movie-pepper-back,hugo19941994/movie-pepper-back
--- +++ @@ -0,0 +1,38 @@ +from tfidf_lsa import calculate_corpus_var +import json +import os +import shutil +import subprocess +import unittest + +class TestMoviePepper(unittest.TestCase): + + def test_crawl(self): + try: + shutil.rmtree('./movie_scrape/crawls') + os.remove('./movie_scrape/imdb.json') + os.remove('./db.json') + except FileNotFoundError: + pass + except OSError: + pass + + subprocess.run(['START_URL="http://www.imdb.com/search/title?role=nm0000095&title_type=feature&user_rating=8.0,10" ./scrap.sh'], cwd="./movie_scrape/", shell=True) + + try: + with open('./movie_scrape/imdb.json', 'r') as in_file: + json.load(in_file) + except: + self.fail() + + def test_tfidf_lsa(self): + calculate_corpus_var(max_df=200, min_df=2, n_components=10, max_features=None) + + try: + with open('./db.json', 'r') as in_file: + json.load(in_file) + except: + self.fail() + +if __name__ == '__main__': + unittest.main()
d180b780487b81b06beb24e809cfb17fd2320e3f
tests/test_get_user_config.py
tests/test_get_user_config.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_get_user_config -------------------- Tests formerly known from a unittest residing in test_config.py named """ import pytest @pytest.fixture(scope='function') def back_up_rc(request): """ Back up an existing cookiecutter rc and restore it after the test. If ~/.cookiecutterrc is pre-existing, move it to a temp location """ self.user_config_path = os.path.expanduser('~/.cookiecutterrc') self.user_config_path_backup = os.path.expanduser( '~/.cookiecutterrc.backup' ) if os.path.exists(self.user_config_path): shutil.copy(self.user_config_path, self.user_config_path_backup) os.remove(self.user_config_path) def restore_rc(): """ If it existed, restore ~/.cookiecutterrc """ if os.path.exists(self.user_config_path_backup): shutil.copy(self.user_config_path_backup, self.user_config_path) os.remove(self.user_config_path_backup) request.addfinalizer(restore_rc)
Create new module for TestGetUserConfig with setup/teardown
Create new module for TestGetUserConfig with setup/teardown
Python
bsd-3-clause
audreyr/cookiecutter,jhermann/cookiecutter,drgarcia1986/cookiecutter,willingc/cookiecutter,stevepiercy/cookiecutter,0k/cookiecutter,benthomasson/cookiecutter,venumech/cookiecutter,benthomasson/cookiecutter,terryjbates/cookiecutter,dajose/cookiecutter,vincentbernat/cookiecutter,atlassian/cookiecutter,cguardia/cookiecutter,terryjbates/cookiecutter,christabor/cookiecutter,janusnic/cookiecutter,pjbull/cookiecutter,ramiroluz/cookiecutter,cichm/cookiecutter,vintasoftware/cookiecutter,janusnic/cookiecutter,takeflight/cookiecutter,luzfcb/cookiecutter,nhomar/cookiecutter,agconti/cookiecutter,drgarcia1986/cookiecutter,Vauxoo/cookiecutter,jhermann/cookiecutter,ionelmc/cookiecutter,Springerle/cookiecutter,lgp171188/cookiecutter,ramiroluz/cookiecutter,cichm/cookiecutter,hackebrot/cookiecutter,foodszhang/cookiecutter,lucius-feng/cookiecutter,michaeljoseph/cookiecutter,pjbull/cookiecutter,cguardia/cookiecutter,luzfcb/cookiecutter,sp1rs/cookiecutter,sp1rs/cookiecutter,lgp171188/cookiecutter,audreyr/cookiecutter,0k/cookiecutter,stevepiercy/cookiecutter,vincentbernat/cookiecutter,hackebrot/cookiecutter,tylerdave/cookiecutter,vintasoftware/cookiecutter,atlassian/cookiecutter,venumech/cookiecutter,kkujawinski/cookiecutter,christabor/cookiecutter,moi65/cookiecutter,agconti/cookiecutter,Vauxoo/cookiecutter,Springerle/cookiecutter,nhomar/cookiecutter,kkujawinski/cookiecutter,ionelmc/cookiecutter,moi65/cookiecutter,foodszhang/cookiecutter,dajose/cookiecutter,michaeljoseph/cookiecutter,takeflight/cookiecutter,willingc/cookiecutter,tylerdave/cookiecutter,lucius-feng/cookiecutter
--- +++ @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +test_get_user_config +-------------------- + +Tests formerly known from a unittest residing in test_config.py named +""" + +import pytest + + +@pytest.fixture(scope='function') +def back_up_rc(request): + """ + Back up an existing cookiecutter rc and restore it after the test. + If ~/.cookiecutterrc is pre-existing, move it to a temp location + """ + self.user_config_path = os.path.expanduser('~/.cookiecutterrc') + self.user_config_path_backup = os.path.expanduser( + '~/.cookiecutterrc.backup' + ) + + if os.path.exists(self.user_config_path): + shutil.copy(self.user_config_path, self.user_config_path_backup) + os.remove(self.user_config_path) + + def restore_rc(): + """ + If it existed, restore ~/.cookiecutterrc + """ + if os.path.exists(self.user_config_path_backup): + shutil.copy(self.user_config_path_backup, self.user_config_path) + os.remove(self.user_config_path_backup) + request.addfinalizer(restore_rc)
bd5537414ed5d05eeab2c41b22af9d665823ccaf
api_v3/migrations/0008_v1_to_v2_attachments.py
api_v3/migrations/0008_v1_to_v2_attachments.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2017-08-26 13:12 from __future__ import unicode_literals import os.path from django.db import migrations from settings.settings import MEDIA_ROOT from ticket.models import TicketAttachment from api_v3.models import Attachment def generate_and_copy_old_file_names(apps, schema_editor): old_attachments = TicketAttachment.objects.all() for att in old_attachments: new_path = att.local_path.replace(MEDIA_ROOT + '/', '') if 'attachments/' in new_path: continue file_name = os.path.basename(new_path) Attachment.objects.filter(upload=file_name).update(upload=new_path) class Migration(migrations.Migration): dependencies = [ ('api_v3', '0007_v1_to_v2_tickets'), ] operations = [ migrations.RunPython(generate_and_copy_old_file_names), migrations.RunSQL( 'update api_v3_ticket set sent_notifications_at=now() ' 'where sent_notifications_at is null;' ) ]
Migrate v1 attachment file names.
Migrate v1 attachment file names.
Python
mit
occrp/id-backend
--- +++ @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.10.1 on 2017-08-26 13:12 +from __future__ import unicode_literals +import os.path + +from django.db import migrations + +from settings.settings import MEDIA_ROOT +from ticket.models import TicketAttachment +from api_v3.models import Attachment + + +def generate_and_copy_old_file_names(apps, schema_editor): + old_attachments = TicketAttachment.objects.all() + + for att in old_attachments: + new_path = att.local_path.replace(MEDIA_ROOT + '/', '') + + if 'attachments/' in new_path: + continue + + file_name = os.path.basename(new_path) + Attachment.objects.filter(upload=file_name).update(upload=new_path) + + +class Migration(migrations.Migration): + + dependencies = [ + ('api_v3', '0007_v1_to_v2_tickets'), + ] + + operations = [ + migrations.RunPython(generate_and_copy_old_file_names), + migrations.RunSQL( + 'update api_v3_ticket set sent_notifications_at=now() ' + 'where sent_notifications_at is null;' + ) + ]
d89d783e9b4555eadf17467a9022fec2c73b3943
fdp-api/python/tests/dump_metadata.py
fdp-api/python/tests/dump_metadata.py
# # This script creates dump files of metadata in different formats upon requests to FDP. # from os import path, makedirs from urllib2 import urlopen, urlparse, Request from rdflib import Graph from logging import getLogger, StreamHandler, INFO from myglobals import * logger = getLogger(__name__) logger.setLevel(INFO) ch = StreamHandler() ch.setLevel(INFO) logger.addHandler(ch) def dump(): for fmt,fxt in MIME_TYPES.iteritems(): dump_path = path.join(DUMP_DIR, path.basename(fmt)) makedirs(dump_path) for url in [ urlparse.urljoin(BASE_URL, p) for p in URL_PATHS ]: logger.info("Request metadata in '%s' from\n %s\n" % (fmt, url)) req = Request(url) req.add_header('Accept', fmt) res = urlopen(req) fname = '%s.%s' % (path.basename(urlparse.urlparse(url).path), fxt) fname = path.join(dump_path, fname) logger.info("Write metadata into file './%s'\n" % fname) with open(fname, 'w') as fout: fout.write(res.read()) dump()
Add script to dump FDP metadata into files in different formats.
Add script to dump FDP metadata into files in different formats.
Python
apache-2.0
DTL-FAIRData/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,DTL-FAIRData/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint,NLeSC/ODEX-FAIRDataPoint
--- +++ @@ -0,0 +1,39 @@ +# +# This script creates dump files of metadata in different formats upon requests to FDP. +# + +from os import path, makedirs +from urllib2 import urlopen, urlparse, Request +from rdflib import Graph +from logging import getLogger, StreamHandler, INFO +from myglobals import * + + +logger = getLogger(__name__) +logger.setLevel(INFO) +ch = StreamHandler() +ch.setLevel(INFO) +logger.addHandler(ch) + + +def dump(): + for fmt,fxt in MIME_TYPES.iteritems(): + dump_path = path.join(DUMP_DIR, path.basename(fmt)) + makedirs(dump_path) + + for url in [ urlparse.urljoin(BASE_URL, p) for p in URL_PATHS ]: + logger.info("Request metadata in '%s' from\n %s\n" % (fmt, url)) + + req = Request(url) + req.add_header('Accept', fmt) + res = urlopen(req) + fname = '%s.%s' % (path.basename(urlparse.urlparse(url).path), fxt) + fname = path.join(dump_path, fname) + + logger.info("Write metadata into file './%s'\n" % fname) + + with open(fname, 'w') as fout: + fout.write(res.read()) + +dump() +
8994ca798a9cc2971954ce2b30bb6b5284c6e927
twinkles/PostageStampMaker.py
twinkles/PostageStampMaker.py
import lsst.afw.geom as afwGeom import lsst.afw.image as afwImage class PostageStampMaker(object): def __init__(self, expfile): self.exposure = afwImage.ExposureF(expfile) def getScienceArray(self): return self.exposure.getMaskedImage().getImage().getArray() def getBBox(self, ra, dec, arcsec): ra_angle = afwGeom.Angle(ra, afwGeom.degrees) dec_angle = afwGeom.Angle(dec, afwGeom.degrees) wcs = self.exposure.getWcs() center_pix = wcs.skyToPixel(ra_angle, dec_angle) pixel_scale = wcs.pixelScale().asArcseconds() npix = int(arcsec/pixel_scale) llc = afwGeom.Point2I(int(center_pix.getX() - npix/2.), int(center_pix.getY() - npix/2.)) bbox = afwGeom.Box2I(llc, afwGeom.Extent2I(npix, npix)) return bbox def Factory(self, ra, dec, arcsec): bbox = self.getBBox(ra, dec, arcsec) return self.exposure.Factory(self.exposure, bbox) if __name__ == '__main__': import lsst.afw.display.ds9 as ds9 expfile = '/home/jchiang/work/LSST/DESC/Twinkles/tests/v840-fr.fits' ra, dec, arcsec = 53.010895, -27.437648, 10 outfile = 'postage_stamp.fits' stamp_maker = PostageStampMaker(expfile) postage_stamp = stamp_maker.Factory(ra, dec, arcsec) postage_stamp.writeFits(outfile) # ds9.mtv(postage_stamp.getMaskedImage().getImage())
Use Stack to create postage stamps from Exposure (specifically CoaddTempExp) images
Use Stack to create postage stamps from Exposure (specifically CoaddTempExp) images
Python
mit
LSSTDESC/Twinkles,DarkEnergyScienceCollaboration/Twinkles,DarkEnergyScienceCollaboration/Twinkles,LSSTDESC/Twinkles
--- +++ @@ -0,0 +1,36 @@ +import lsst.afw.geom as afwGeom +import lsst.afw.image as afwImage + + +class PostageStampMaker(object): + def __init__(self, expfile): + self.exposure = afwImage.ExposureF(expfile) + def getScienceArray(self): + return self.exposure.getMaskedImage().getImage().getArray() + def getBBox(self, ra, dec, arcsec): + ra_angle = afwGeom.Angle(ra, afwGeom.degrees) + dec_angle = afwGeom.Angle(dec, afwGeom.degrees) + wcs = self.exposure.getWcs() + center_pix = wcs.skyToPixel(ra_angle, dec_angle) + pixel_scale = wcs.pixelScale().asArcseconds() + npix = int(arcsec/pixel_scale) + llc = afwGeom.Point2I(int(center_pix.getX() - npix/2.), + int(center_pix.getY() - npix/2.)) + bbox = afwGeom.Box2I(llc, afwGeom.Extent2I(npix, npix)) + return bbox + def Factory(self, ra, dec, arcsec): + bbox = self.getBBox(ra, dec, arcsec) + return self.exposure.Factory(self.exposure, bbox) + +if __name__ == '__main__': + import lsst.afw.display.ds9 as ds9 + expfile = '/home/jchiang/work/LSST/DESC/Twinkles/tests/v840-fr.fits' + ra, dec, arcsec = 53.010895, -27.437648, 10 + outfile = 'postage_stamp.fits' + + stamp_maker = PostageStampMaker(expfile) + postage_stamp = stamp_maker.Factory(ra, dec, arcsec) + + postage_stamp.writeFits(outfile) + +# ds9.mtv(postage_stamp.getMaskedImage().getImage())
87460f7ff5d5079cffb7f1c02930fea2f891d3f0
wafer/management/commands/pycon_speaker_contact_details.py
wafer/management/commands/pycon_speaker_contact_details.py
import sys import csv from optparse import make_option from django.core.management.base import BaseCommand from django.contrib.auth.models import User from wafer.conf_registration.models import RegisteredAttendee from wafer.talks.models import ACCEPTED class Command(BaseCommand): help = "List contact details for the speakers." option_list = BaseCommand.option_list + tuple([ make_option('--speakers', action="store_true", default=False, help='List speaker email addresses' ' (for accepted talks)'), make_option('--allspeakers', action="store_true", default=False, help='List speaker email addresses' ' (for all talks)'), ]) def _speaker_emails(self, options): people = User.objects.filter(talks__isnull=False).distinct() csv_file = csv.writer(sys.stdout) for person in people: if options['allspeakers']: titles = [x.title for x in person.talks.all()] else: titles = [x.title for x in person.talks.filter(status=ACCEPTED)] if not titles: continue # get_full_name may be blank, since we don't require that # the user specify it, but we will have the email as an # identifier row = [x.encode("utf-8") for x in (person.get_full_name(), person.get_profile().contact_number or 'NO CONTACT INFO', ';'.join(titles))] csv_file.writerow(row) def handle(self, *args, **options): self._speaker_emails(options)
Add a query command for easily finding people without contact info
Add a query command for easily finding people without contact info
Python
isc
CarlFK/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CTPUG/wafer,CarlFK/wafer,CTPUG/wafer,CarlFK/wafer
--- +++ @@ -0,0 +1,46 @@ +import sys +import csv +from optparse import make_option + +from django.core.management.base import BaseCommand + +from django.contrib.auth.models import User +from wafer.conf_registration.models import RegisteredAttendee +from wafer.talks.models import ACCEPTED + + +class Command(BaseCommand): + help = "List contact details for the speakers." + + option_list = BaseCommand.option_list + tuple([ + make_option('--speakers', action="store_true", default=False, + help='List speaker email addresses' + ' (for accepted talks)'), + make_option('--allspeakers', action="store_true", default=False, + help='List speaker email addresses' + ' (for all talks)'), + ]) + + def _speaker_emails(self, options): + people = User.objects.filter(talks__isnull=False).distinct() + + csv_file = csv.writer(sys.stdout) + for person in people: + if options['allspeakers']: + titles = [x.title for x in person.talks.all()] + else: + titles = [x.title for x in + person.talks.filter(status=ACCEPTED)] + if not titles: + continue + # get_full_name may be blank, since we don't require that + # the user specify it, but we will have the email as an + # identifier + row = [x.encode("utf-8") + for x in (person.get_full_name(), + person.get_profile().contact_number or 'NO CONTACT INFO', + ';'.join(titles))] + csv_file.writerow(row) + + def handle(self, *args, **options): + self._speaker_emails(options)
a06796003e72cee518e66c7250afc3e4aec6ab7a
codingame/medium/dwarfs_standing.py
codingame/medium/dwarfs_standing.py
from collections import defaultdict def traverseRelations(relations, children, length): if len(children) == 0: return length lengths = [] for child in children: lengths.append(traverseRelations(relations, relations[child], length + 1)) return max(lengths) # The number of relationships of influence n = int(raw_input()) relations = defaultdict(list) for i in xrange(n): # A relationship of influence between two people (x influences y) x, y = [int(j) for j in raw_input().split()] relations[x].append(y) # Find the longest succession of influences lengths = [] for childKey in relations.keys(): lengths.append(traverseRelations(relations, relations[childKey], 1)) print max(lengths)
Add exercise Dwarfs standing on the shoulders of giants
Add exercise Dwarfs standing on the shoulders of giants
Python
mit
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
--- +++ @@ -0,0 +1,27 @@ +from collections import defaultdict + + +def traverseRelations(relations, children, length): + if len(children) == 0: + return length + + lengths = [] + for child in children: + lengths.append(traverseRelations(relations, relations[child], length + 1)) + + return max(lengths) + +# The number of relationships of influence +n = int(raw_input()) +relations = defaultdict(list) +for i in xrange(n): + # A relationship of influence between two people (x influences y) + x, y = [int(j) for j in raw_input().split()] + relations[x].append(y) + +# Find the longest succession of influences +lengths = [] +for childKey in relations.keys(): + lengths.append(traverseRelations(relations, relations[childKey], 1)) + +print max(lengths)
c7e45b8f5eb06b2bd1934b357ae44b968375ed6f
geotrek/infrastructure/migrations/0024_auto_20210716_1043.py
geotrek/infrastructure/migrations/0024_auto_20210716_1043.py
# Generated by Django 3.1.13 on 2021-07-16 10:43 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('infrastructure', '0023_auto_20210716_0738'), ] operations = [ migrations.AlterField( model_name='infrastructure', name='maintenance_difficulty', field=models.ForeignKey(blank=True, help_text="Danger level of maintenance agents' interventions on infrastructure", null=True, on_delete=django.db.models.deletion.SET_NULL, to='infrastructure.infrastructuremaintenancedifficultylevel', verbose_name='Maintenance difficulty'), ), migrations.AlterField( model_name='infrastructure', name='usage_difficulty', field=models.ForeignKey(blank=True, help_text="Danger level of end users' infrastructure usage", null=True, on_delete=django.db.models.deletion.SET_NULL, to='infrastructure.infrastructureusagedifficultylevel', verbose_name='Usage difficulty'), ), ]
Fix add help text on forms with migration
Fix add help text on forms with migration
Python
bsd-2-clause
makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek
--- +++ @@ -0,0 +1,24 @@ +# Generated by Django 3.1.13 on 2021-07-16 10:43 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('infrastructure', '0023_auto_20210716_0738'), + ] + + operations = [ + migrations.AlterField( + model_name='infrastructure', + name='maintenance_difficulty', + field=models.ForeignKey(blank=True, help_text="Danger level of maintenance agents' interventions on infrastructure", null=True, on_delete=django.db.models.deletion.SET_NULL, to='infrastructure.infrastructuremaintenancedifficultylevel', verbose_name='Maintenance difficulty'), + ), + migrations.AlterField( + model_name='infrastructure', + name='usage_difficulty', + field=models.ForeignKey(blank=True, help_text="Danger level of end users' infrastructure usage", null=True, on_delete=django.db.models.deletion.SET_NULL, to='infrastructure.infrastructureusagedifficultylevel', verbose_name='Usage difficulty'), + ), + ]
a6b92bba1c92a2d324b6c1f1602dae0559859356
graffiti/descriptor.py
graffiti/descriptor.py
import copy from itertools import chain from graffiti import util example = { "a": lambda b: 1, "b": lambda c: 2, "c": lambda: 3 } def mapkv(fn, d): return dict(fn(k, v) for k, v in d.iteritems()) def map_keys(fn, d): return mapkv(lambda k, v: (fn(k), v), d) def map_vals(fn, d): return mapkv(lambda k, v: (k, fn(v)), d) def select_keys(fn, d): return { k: v for k, v in d.iteritems() if fn(k, v) } def schema(v): if not callable(v): v = lambda: v return util.fninfo(v) def dependencies(g): deps = {} for k, v in g.iteritems(): for arg in v["required"]: deps.setdefault(k, set()).add(arg) return deps def topological(deps): if not deps: return [] sources = list(set(deps) - set(chain(*deps.values()))) if not sources: raise ValueError("Graph cycle detected!") return (sources + topological(select_keys(lambda k, _: k not in sources, deps))) def base_compile(g): if callable(g): return g else: canonical = map_vals(base_compile, g) schematized = map_vals(schema, canonical) deps = dependencies(schematized) rev_topo = topological(deps)[::-1] return { "schema": schematized, "node_order": rev_topo, "edges": deps, }
Add graph compile with topological sort
Add graph compile with topological sort
Python
mit
SegFaultAX/graffiti
--- +++ @@ -0,0 +1,58 @@ +import copy +from itertools import chain + +from graffiti import util + +example = { + "a": lambda b: 1, + "b": lambda c: 2, + "c": lambda: 3 +} + +def mapkv(fn, d): + return dict(fn(k, v) for k, v in d.iteritems()) + +def map_keys(fn, d): + return mapkv(lambda k, v: (fn(k), v), d) + +def map_vals(fn, d): + return mapkv(lambda k, v: (k, fn(v)), d) + +def select_keys(fn, d): + return { k: v for k, v in d.iteritems() if fn(k, v) } + +def schema(v): + if not callable(v): + v = lambda: v + return util.fninfo(v) + +def dependencies(g): + deps = {} + for k, v in g.iteritems(): + for arg in v["required"]: + deps.setdefault(k, set()).add(arg) + return deps + +def topological(deps): + if not deps: + return [] + sources = list(set(deps) - set(chain(*deps.values()))) + if not sources: + raise ValueError("Graph cycle detected!") + return (sources + + topological(select_keys(lambda k, _: k not in sources, deps))) + +def base_compile(g): + if callable(g): + return g + else: + canonical = map_vals(base_compile, g) + schematized = map_vals(schema, canonical) + deps = dependencies(schematized) + rev_topo = topological(deps)[::-1] + + return { + "schema": schematized, + "node_order": rev_topo, + "edges": deps, + }
ad425b73ab396532b042e8b01be00af6024ef910
apps/meetup/tests/test_models.py
apps/meetup/tests/test_models.py
# coding: utf-8 import datetime from django.test import TestCase, override_settings from unittest.mock import patch from embedly import Embedly from apps.meetup.models import Talk, Event, Speaker class FakeOembed(): _data = {"key": "value"} class TalkTestCase(TestCase): def setUp(self): self.event = Event.objects.create( date=datetime.datetime(2015, 5, 16, 2, 0, 0), name="test", ) self.speaker = Speaker.objects.create( name="test", slug="test", ) @override_settings(EMBEDLY_KEY="internal") def test_set_embedly_data(self): talk = Talk.objects.create( event=self.event, speaker=self.speaker, name="test" ) with patch.object(Embedly, 'oembed', return_value=FakeOembed()) as oembed: talk.presentation = "http://example.com/presentation/" talk.video = "http://example.com/video/" talk.save() self.assertEqual(oembed.call_count, 2) oembed.assert_any_call("http://example.com/presentation/") oembed.assert_any_call("http://example.com/video/") self.assertEqual(talk.presentation, "http://example.com/presentation/") self.assertEqual(talk.video, "http://example.com/video/") self.assertEqual(talk.presentation_data, {"key": "value"}) self.assertEqual(talk.video_data, {"key": "value"}) with patch.object(Embedly, 'oembed', return_value=FakeOembed()) as oembed: talk.presentation = "" talk.video = "" talk.save() self.assertEqual(oembed.call_count, 0) self.assertEqual(talk.presentation, "") self.assertEqual(talk.video, "") self.assertEqual(talk.presentation_data, "") self.assertEqual(talk.video_data, "")
Test for Embedly in Talk
Test for Embedly in Talk
Python
bsd-3-clause
moscowpython/moscowpython,VladimirFilonov/moscowdjango,VladimirFilonov/moscowdjango,moscowpython/moscowpython,moscowpython/moscowpython,VladimirFilonov/moscowdjango,moscowdjango/moscowdjango,moscowdjango/moscowdjango,moscowdjango/moscowdjango
--- +++ @@ -0,0 +1,57 @@ +# coding: utf-8 +import datetime +from django.test import TestCase, override_settings +from unittest.mock import patch +from embedly import Embedly +from apps.meetup.models import Talk, Event, Speaker + + +class FakeOembed(): + _data = {"key": "value"} + + +class TalkTestCase(TestCase): + + def setUp(self): + self.event = Event.objects.create( + date=datetime.datetime(2015, 5, 16, 2, 0, 0), + name="test", + + ) + self.speaker = Speaker.objects.create( + name="test", + slug="test", + ) + + @override_settings(EMBEDLY_KEY="internal") + def test_set_embedly_data(self): + talk = Talk.objects.create( + event=self.event, + speaker=self.speaker, + name="test" + ) + with patch.object(Embedly, 'oembed', return_value=FakeOembed()) as oembed: + talk.presentation = "http://example.com/presentation/" + talk.video = "http://example.com/video/" + talk.save() + + self.assertEqual(oembed.call_count, 2) + oembed.assert_any_call("http://example.com/presentation/") + oembed.assert_any_call("http://example.com/video/") + + self.assertEqual(talk.presentation, "http://example.com/presentation/") + self.assertEqual(talk.video, "http://example.com/video/") + self.assertEqual(talk.presentation_data, {"key": "value"}) + self.assertEqual(talk.video_data, {"key": "value"}) + + with patch.object(Embedly, 'oembed', return_value=FakeOembed()) as oembed: + talk.presentation = "" + talk.video = "" + talk.save() + + self.assertEqual(oembed.call_count, 0) + + self.assertEqual(talk.presentation, "") + self.assertEqual(talk.video, "") + self.assertEqual(talk.presentation_data, "") + self.assertEqual(talk.video_data, "")
1a12130c7be8b36a905de7000661d1ff91ed808c
osf/management/commands/checkmigrations.py
osf/management/commands/checkmigrations.py
""" Return a non-zero exit code if there are unapplied migrations. """ import sys from django.db import connections, DEFAULT_DB_ALIAS from django.db.migrations.executor import MigrationExecutor from django.core.management.base import BaseCommand class Command(BaseCommand): def handle(self, *args, **options): connection = connections[DEFAULT_DB_ALIAS] connection.prepare_database() executor = MigrationExecutor(connection) targets = executor.loader.graph.leaf_nodes() unapplied_migrations = executor.migration_plan(targets) if unapplied_migrations: self.stdout.write('The following migrations are unapplied:', self.style.ERROR) for migration in unapplied_migrations: self.stdout.write(' {}.{}'.format(migration[0].app_label, migration[0].name), self.style.MIGRATE_LABEL) sys.exit(1) self.stdout.write('All migrations have been applied. Have a nice day!', self.style.SUCCESS)
Add a management command to check for unapplied migrations.
Add a management command to check for unapplied migrations. [PLAT-972]
Python
apache-2.0
mfraezz/osf.io,brianjgeiger/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,saradbowman/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,adlius/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,mattclark/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,sloria/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,baylee-d/osf.io,felliott/osf.io,aaxelb/osf.io,felliott/osf.io,sloria/osf.io,Johnetordoff/osf.io,mattclark/osf.io,mfraezz/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,mfraezz/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,adlius/osf.io,aaxelb/osf.io,felliott/osf.io,erinspace/osf.io,baylee-d/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,erinspace/osf.io,Johnetordoff/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,mfraezz/osf.io,saradbowman/osf.io,mattclark/osf.io,adlius/osf.io,erinspace/osf.io,cslzchen/osf.io,felliott/osf.io,CenterForOpenScience/osf.io
--- +++ @@ -0,0 +1,23 @@ +""" +Return a non-zero exit code if there are unapplied migrations. +""" +import sys + +from django.db import connections, DEFAULT_DB_ALIAS +from django.db.migrations.executor import MigrationExecutor +from django.core.management.base import BaseCommand + +class Command(BaseCommand): + + def handle(self, *args, **options): + connection = connections[DEFAULT_DB_ALIAS] + connection.prepare_database() + executor = MigrationExecutor(connection) + targets = executor.loader.graph.leaf_nodes() + unapplied_migrations = executor.migration_plan(targets) + if unapplied_migrations: + self.stdout.write('The following migrations are unapplied:', self.style.ERROR) + for migration in unapplied_migrations: + self.stdout.write(' {}.{}'.format(migration[0].app_label, migration[0].name), self.style.MIGRATE_LABEL) + sys.exit(1) + self.stdout.write('All migrations have been applied. Have a nice day!', self.style.SUCCESS)
097464150228c3b5ba7cfb94619363cfea1fba1d
user-scripts/katia/code-LUI-secmin-isl.py
user-scripts/katia/code-LUI-secmin-isl.py
#!/usr/bin/env python3 import os import sys import rasterio from projections.rasterset import RasterSet import projections.predicts as predicts import projections.utils as utils # Import standard PREDICTS rasters rasters = predicts.rasterset('1km', 'medium', year = 2005) for suffix in ('islands', 'mainland'): # Open the BII raster file mask_file = 'C:/Users/katis2/Desktop/Final_projections/Clip_variables/abundance-based-bii-%s.tif' % suffix mask_ds = rasterio.open(mask_file) # set up the rasterset, cropping to mainlands rs = RasterSet(rasters, mask=mask_ds, maskval=-9999, crop=True) # Run through each land-use for lu in ('cropland', 'pasture', 'primary', 'secondary', 'urban'): # And every use intensity for ui in ('minimal', 'light', 'intense'): name = '%s_%s' % (lu, ui) print(name) oname = utils.outfn('katia', '%s-%s.tif' % (name, suffix)) if os.path.isfile(oname) or name in ('secondary_intense', 'urnan_light'): continue rs.write(name, oname)
Add script to generate LUI rasters for Katia
Add script to generate LUI rasters for Katia This script iterates through every land-use x intensity and writes out the raster for the LUI.
Python
apache-2.0
ricardog/raster-project,ricardog/raster-project,ricardog/raster-project,ricardog/raster-project,ricardog/raster-project
--- +++ @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 + +import os +import sys + +import rasterio +from projections.rasterset import RasterSet +import projections.predicts as predicts +import projections.utils as utils + +# Import standard PREDICTS rasters +rasters = predicts.rasterset('1km', 'medium', year = 2005) + +for suffix in ('islands', 'mainland'): + # Open the BII raster file + mask_file = 'C:/Users/katis2/Desktop/Final_projections/Clip_variables/abundance-based-bii-%s.tif' % suffix + mask_ds = rasterio.open(mask_file) + + # set up the rasterset, cropping to mainlands + rs = RasterSet(rasters, mask=mask_ds, maskval=-9999, crop=True) + + # Run through each land-use + for lu in ('cropland', 'pasture', 'primary', 'secondary', 'urban'): + # And every use intensity + for ui in ('minimal', 'light', 'intense'): + name = '%s_%s' % (lu, ui) + print(name) + oname = utils.outfn('katia', '%s-%s.tif' % (name, suffix)) + if os.path.isfile(oname) or name in ('secondary_intense', + 'urnan_light'): + continue + rs.write(name, oname) +
ebf89478f7c841ee7d61a4081d4d6ba2f2cabe05
app/main/auth.py
app/main/auth.py
from functools import wraps from flask import abort from flask_login import current_user def role_required(*roles): """Ensure that logged in user has one of the required roles. Return 403 if the user doesn't have a required role. Should be applied before the `@login_required` decorator: @login_required @role_required('admin', 'admin-ccs') def view(): ... """ def role_decorator(func): @wraps(func) def decorated_view(*args, **kwargs): if not any(current_user.has_role(role) for role in roles): return abort(403, "One of {} roles required".format(", ".join(roles))) return func(*args, **kwargs) return decorated_view return role_decorator
Add `role_required` view decorator to check current_user role
Add `role_required` view decorator to check current_user role Returns 403 if user has none of the roles listed in the decorator arguments.
Python
mit
alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend,alphagov/digitalmarketplace-admin-frontend
--- +++ @@ -0,0 +1,30 @@ +from functools import wraps + +from flask import abort +from flask_login import current_user + + +def role_required(*roles): + """Ensure that logged in user has one of the required roles. + + Return 403 if the user doesn't have a required role. + + Should be applied before the `@login_required` decorator: + + @login_required + @role_required('admin', 'admin-ccs') + def view(): + ... + + """ + + def role_decorator(func): + @wraps(func) + def decorated_view(*args, **kwargs): + if not any(current_user.has_role(role) for role in roles): + return abort(403, "One of {} roles required".format(", ".join(roles))) + return func(*args, **kwargs) + + return decorated_view + + return role_decorator
ff5587bc44bde955e456ed87e7ed5822ec3e500a
tests/webcam_framerate.py
tests/webcam_framerate.py
#!/usr/bin/env python import qrtools, gi, os gi.require_version('Gtk', '3.0') gi.require_version('Gst', '1.0') from gi.repository import Gtk, Gst from avocado import Test from utils import webcam class WebcamReadQR(Test): """ Uses the camera selected by v4l2src by default (/dev/video0) to get the framerate by creating a pipeline with an fpsdisplaysink and initializing Gtk main loop. For now is tested whether the framerate is 30 or more. """ def setUp(self): self.error = None #if not os.path.exists('/dev/video0'): #self.skip("No webcam detected: /dev/video0 cannot be found"); def test(self): elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false ' 'signal-fps-measurements=true'] webcam.create_video_pipeline(self, gst_elements=elements, v4l2src_args="num-buffers=2000") bus = self.video_player.get_bus() bus.connect("fps-measurements", self.on_fps_measurement) Gtk.main() if self.error != None: self.fail("Error: {0}".format(self.error)) if self.fps < 30: self.fail("Measured fps is below 30, {0}".format(self.fps)) self.log.debug("Measured fps is 30 or more, {0}".format(self.fps)) def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps): self.fps = avgfps def on_message(self, bus, message): t = message.type if t == Gst.MessageType.EOS: webcam.exit(self) elif t == Gst.MessageType.ERROR: webcam.exit(self) self.error = message.parse_error()
Add initial framerate webcam test structure
Add initial framerate webcam test structure
Python
mit
daveol/Fedora-Test-Laptop,daveol/Fedora-Test-Laptop
--- +++ @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +import qrtools, gi, os +gi.require_version('Gtk', '3.0') +gi.require_version('Gst', '1.0') +from gi.repository import Gtk, Gst +from avocado import Test +from utils import webcam + +class WebcamReadQR(Test): + """ + Uses the camera selected by v4l2src by default (/dev/video0) to get the + framerate by creating a pipeline with an fpsdisplaysink and initializing + Gtk main loop. For now is tested whether the framerate is 30 or more. + + """ + def setUp(self): + self.error = None + #if not os.path.exists('/dev/video0'): + #self.skip("No webcam detected: /dev/video0 cannot be found"); + + def test(self): + elements = ['fpsdisplaysink video-sink=fakesink text-overlay=false ' + 'signal-fps-measurements=true'] + webcam.create_video_pipeline(self, gst_elements=elements, + v4l2src_args="num-buffers=2000") + + bus = self.video_player.get_bus() + bus.connect("fps-measurements", self.on_fps_measurement) + + Gtk.main() + + if self.error != None: + self.fail("Error: {0}".format(self.error)) + + if self.fps < 30: + self.fail("Measured fps is below 30, {0}".format(self.fps)) + self.log.debug("Measured fps is 30 or more, {0}".format(self.fps)) + + def on_fps_measurement(self, fpsdisplaysink, fps, droprate, avgfps): + self.fps = avgfps + + def on_message(self, bus, message): + t = message.type + + if t == Gst.MessageType.EOS: + webcam.exit(self) + + elif t == Gst.MessageType.ERROR: + webcam.exit(self) + self.error = message.parse_error()
13a02d424fadd4401d393b2443dc97183c933221
simple-client-api/request-workflow-exec/python2_3/src/request-workflow-exec.py
simple-client-api/request-workflow-exec/python2_3/src/request-workflow-exec.py
#!/usr/bin/python from boto3.session import Session import sys import os import uuid import time def get_env(name): if name in os.environ: return os.environ[name] return None aws_args = { 'aws_access_key_id': get_env('AWS_ACCESS_KEY'), 'aws_secret_access_key': get_env('AWS_SECRET_KEY'), 'region_name': get_env('AWS_REGION'), 'aws_session_token': get_env('AWS_SESSION_TOKEN'), 'profile_name': get_env('AWS_PROFILE_NAME') } AWS_ARG_MAP = { '--ak': 'aws_access_key_id', '--as': 'aws_secret_access_key', '--ar': 'region_name', '--at': 'aws_session_token', '--ap': 'profile_name' } dynamodb_args = {} db_prefix = 'whimbrel_' workflow = None source = 'Python CLI' i = 1 while i < len(sys.argv): # AWS specific setup if sys.argv[i] in AWS_ARG_MAP: arg = sys.argv[i] i += 1 aws_args[AWS_ARG_MAP[arg]] = sys.argv[i] # DynamoDB specific setup elif sys.argv[i] == '--endpoint': i += 1 dynamodb_args['endpoint_url'] = sys.argv[i] elif sys.argv[i] == '--ssl': dynamodb_args['use_ssl'] = True # Whimbrel specific setup elif sys.argv[i] == '--prefix': i += 1 db_prefix = sys.argv[i] elif sys.argv[i] == '--workflow': i += 1 workflow = sys.argv[i] elif sys.argv[i] == '--source': i += 1 source = sys.argv[i] i += 1 session = Session(**aws_args) db = session.client('dynamodb', **dynamodb_args) workflow_request_id = workflow + '::' + str(uuid.uuid1()) when_epoch = int(time.time()) when_gm = time.gmtime(when_epoch) when_list = [when_gm.tm_year, when_gm.tm_mon, when_gm.tm_mday, when_gm.tm_hour, when_gm.tm_min, when_gm.tm_sec] db.put_item( TableName=db_prefix + 'workflow_request', Item={ "workflow_request_id": {"S":workflow_request_id}, "workflow_name": {"S":workflow}, #"when": {"L": when_list}, "when_epoch": {"N": str(when_epoch)}, "source": {"S": source} } )
Add python (boto3-based) simple api workflow request script.
Add python (boto3-based) simple api workflow request script.
Python
apache-2.0
groboclown/whimbrel,groboclown/whimbrel,groboclown/whimbrel,groboclown/whimbrel
--- +++ @@ -0,0 +1,81 @@ +#!/usr/bin/python + +from boto3.session import Session +import sys +import os +import uuid +import time + + +def get_env(name): + if name in os.environ: + return os.environ[name] + return None + + +aws_args = { + 'aws_access_key_id': get_env('AWS_ACCESS_KEY'), + 'aws_secret_access_key': get_env('AWS_SECRET_KEY'), + 'region_name': get_env('AWS_REGION'), + 'aws_session_token': get_env('AWS_SESSION_TOKEN'), + 'profile_name': get_env('AWS_PROFILE_NAME') +} +AWS_ARG_MAP = { + '--ak': 'aws_access_key_id', + '--as': 'aws_secret_access_key', + '--ar': 'region_name', + '--at': 'aws_session_token', + '--ap': 'profile_name' +} +dynamodb_args = {} + +db_prefix = 'whimbrel_' +workflow = None +source = 'Python CLI' + +i = 1 +while i < len(sys.argv): + # AWS specific setup + if sys.argv[i] in AWS_ARG_MAP: + arg = sys.argv[i] + i += 1 + aws_args[AWS_ARG_MAP[arg]] = sys.argv[i] + + # DynamoDB specific setup + elif sys.argv[i] == '--endpoint': + i += 1 + dynamodb_args['endpoint_url'] = sys.argv[i] + elif sys.argv[i] == '--ssl': + dynamodb_args['use_ssl'] = True + + # Whimbrel specific setup + elif sys.argv[i] == '--prefix': + i += 1 + db_prefix = sys.argv[i] + elif sys.argv[i] == '--workflow': + i += 1 + workflow = sys.argv[i] + elif sys.argv[i] == '--source': + i += 1 + source = sys.argv[i] + i += 1 + +session = Session(**aws_args) +db = session.client('dynamodb', **dynamodb_args) + +workflow_request_id = workflow + '::' + str(uuid.uuid1()) +when_epoch = int(time.time()) +when_gm = time.gmtime(when_epoch) +when_list = [when_gm.tm_year, when_gm.tm_mon, when_gm.tm_mday, when_gm.tm_hour, when_gm.tm_min, when_gm.tm_sec] + + +db.put_item( + TableName=db_prefix + 'workflow_request', + Item={ + "workflow_request_id": {"S":workflow_request_id}, + "workflow_name": {"S":workflow}, + #"when": {"L": when_list}, + "when_epoch": {"N": str(when_epoch)}, + "source": {"S": source} + } +)
5cdfbce122ec0248104049760dadc1c83a01f7fb
tests/frontends/mpd/regression_test.py
tests/frontends/mpd/regression_test.py
import unittest from mopidy.backends.dummy import DummyBackend from mopidy.frontends.mpd import dispatcher from mopidy.mixers.dummy import DummyMixer from mopidy.models import Track class IssueGH18RegressionTest(unittest.TestCase): """ The issue: http://github.com/jodal/mopidy/issues#issue/18 How to reproduce: Play, random on, next, random off, next, next. At this point it gives the same song over and over. """ def setUp(self): self.backend = DummyBackend(mixer_class=DummyMixer) self.backend.current_playlist.append([ Track(uri='a'), Track(uri='b'), Track(uri='c'), Track(uri='d'), Track(uri='e'), Track(uri='f')]) self.mpd = dispatcher.MpdDispatcher(backend=self.backend) def test(self): self.mpd.handle_request(u'play') self.mpd.handle_request(u'random "1"') self.mpd.handle_request(u'next') self.mpd.handle_request(u'random "0"') self.mpd.handle_request(u'next') self.mpd.handle_request(u'next') cp_track_1 = self.backend.playback.current_cp_track self.mpd.handle_request(u'next') cp_track_2 = self.backend.playback.current_cp_track self.mpd.handle_request(u'next') cp_track_3 = self.backend.playback.current_cp_track self.assertNotEqual(cp_track_1, cp_track_2) self.assertNotEqual(cp_track_2, cp_track_3)
Add regression test for GH-18
Add regression test for GH-18
Python
apache-2.0
ZenithDK/mopidy,mopidy/mopidy,kingosticks/mopidy,vrs01/mopidy,vrs01/mopidy,jcass77/mopidy,jodal/mopidy,mokieyue/mopidy,adamcik/mopidy,tkem/mopidy,glogiotatidis/mopidy,rawdlite/mopidy,mokieyue/mopidy,pacificIT/mopidy,bacontext/mopidy,bacontext/mopidy,mopidy/mopidy,bencevans/mopidy,liamw9534/mopidy,ZenithDK/mopidy,SuperStarPL/mopidy,mokieyue/mopidy,tkem/mopidy,quartz55/mopidy,quartz55/mopidy,diandiankan/mopidy,jcass77/mopidy,dbrgn/mopidy,ali/mopidy,swak/mopidy,ali/mopidy,ali/mopidy,jcass77/mopidy,priestd09/mopidy,jodal/mopidy,mokieyue/mopidy,diandiankan/mopidy,hkariti/mopidy,glogiotatidis/mopidy,diandiankan/mopidy,pacificIT/mopidy,pacificIT/mopidy,ZenithDK/mopidy,priestd09/mopidy,quartz55/mopidy,rawdlite/mopidy,dbrgn/mopidy,swak/mopidy,swak/mopidy,dbrgn/mopidy,jmarsik/mopidy,adamcik/mopidy,woutervanwijk/mopidy,kingosticks/mopidy,hkariti/mopidy,rawdlite/mopidy,bencevans/mopidy,adamcik/mopidy,woutervanwijk/mopidy,jmarsik/mopidy,tkem/mopidy,rawdlite/mopidy,glogiotatidis/mopidy,SuperStarPL/mopidy,swak/mopidy,hkariti/mopidy,bencevans/mopidy,jodal/mopidy,ZenithDK/mopidy,bencevans/mopidy,ali/mopidy,jmarsik/mopidy,diandiankan/mopidy,abarisain/mopidy,quartz55/mopidy,liamw9534/mopidy,tkem/mopidy,priestd09/mopidy,hkariti/mopidy,SuperStarPL/mopidy,SuperStarPL/mopidy,dbrgn/mopidy,vrs01/mopidy,pacificIT/mopidy,abarisain/mopidy,mopidy/mopidy,bacontext/mopidy,bacontext/mopidy,jmarsik/mopidy,glogiotatidis/mopidy,kingosticks/mopidy,vrs01/mopidy
--- +++ @@ -0,0 +1,41 @@ +import unittest + +from mopidy.backends.dummy import DummyBackend +from mopidy.frontends.mpd import dispatcher +from mopidy.mixers.dummy import DummyMixer +from mopidy.models import Track + +class IssueGH18RegressionTest(unittest.TestCase): + """ + The issue: http://github.com/jodal/mopidy/issues#issue/18 + + How to reproduce: + + Play, random on, next, random off, next, next. + + At this point it gives the same song over and over. + """ + + def setUp(self): + self.backend = DummyBackend(mixer_class=DummyMixer) + self.backend.current_playlist.append([ + Track(uri='a'), Track(uri='b'), Track(uri='c'), + Track(uri='d'), Track(uri='e'), Track(uri='f')]) + self.mpd = dispatcher.MpdDispatcher(backend=self.backend) + + def test(self): + self.mpd.handle_request(u'play') + self.mpd.handle_request(u'random "1"') + self.mpd.handle_request(u'next') + self.mpd.handle_request(u'random "0"') + self.mpd.handle_request(u'next') + + self.mpd.handle_request(u'next') + cp_track_1 = self.backend.playback.current_cp_track + self.mpd.handle_request(u'next') + cp_track_2 = self.backend.playback.current_cp_track + self.mpd.handle_request(u'next') + cp_track_3 = self.backend.playback.current_cp_track + + self.assertNotEqual(cp_track_1, cp_track_2) + self.assertNotEqual(cp_track_2, cp_track_3)
7cfc6abb8e573b0dd996f5849480a72652201279
consolidate_stats.py
consolidate_stats.py
### # Call this with 4 parameters: the file to read data from, the file to read # extradata from, the file to write the combined data to, the slack interval # to match data and extradata timestamps. # # IMPORTANT: You need to manually sort -g the data file, because torperf # might screw up ordering and this script expects sorted lines! ### import sys, time class Data: def __init__(self, filename, mode="r"): self._filename = filename self._file = open(filename, mode) def prepline(self): line = self._file.readline() if line == "" or line == "\n": raise StopIteration if line[-1] == "\n": line = line[:-1] return line.split(" ") def next(self): return self.prepline() def __iter__(self): return self class ExtraData(Data): def __init__(self, filename): Data.__init__(self, filename) self._curData = None self._retCurrent = False def next(self): if self._retCurrent == True: self._retCurrent = False return self._curData cont = self.prepline() if cont[0] == "ok": self._curData = cont[1:] return self._curData print('Ignoring line "' + " ".join(cont) + '"') return self.next() def keepCurrent(self): self._retCurrent = True class NormalData(Data): def __init__(self, filename): Data.__init__(self, filename) class BetterData(Data): def __init__(self, filename): Data.__init__(self, filename, "w") def writeLine(self, line): self._file.write(" ".join(line) + "\n") def main(): if len(sys.argv) < 5: print("Bad arguments") sys.exit(1) normalData = NormalData(sys.argv[1]) extraData = ExtraData(sys.argv[2]) betterData = BetterData(sys.argv[3]) slack = int(sys.argv[4]) for normal in normalData: normalTime = int(normal[0]) for extra in extraData: extraTime = int(extra[0]) if normalTime > extraTime: print("Got unexpected extradata entry" + " ".join(extra)) continue if normalTime + slack < extraTime: print("Got a data entry without extradata " + " ".join(normal)) extraData.keepCurrent() break normal.extend(extra) betterData.writeLine(normal) break if __name__ == "__main__": main()
Add a script to combine data and extradata
Add a script to combine data and extradata
Python
bsd-3-clause
meejah/torperf,meejah/torperf,meejah/torperf,aaronsw/torperf,aaronsw/torperf,mrphs/torperf,mrphs/torperf,aaronsw/torperf,mrphs/torperf,meejah/torperf
--- +++ @@ -0,0 +1,88 @@ +### +# Call this with 4 parameters: the file to read data from, the file to read +# extradata from, the file to write the combined data to, the slack interval +# to match data and extradata timestamps. +# +# IMPORTANT: You need to manually sort -g the data file, because torperf +# might screw up ordering and this script expects sorted lines! +### + +import sys, time + +class Data: + def __init__(self, filename, mode="r"): + self._filename = filename + self._file = open(filename, mode) + + def prepline(self): + line = self._file.readline() + if line == "" or line == "\n": + raise StopIteration + if line[-1] == "\n": + line = line[:-1] + return line.split(" ") + + def next(self): + return self.prepline() + + def __iter__(self): + return self + +class ExtraData(Data): + def __init__(self, filename): + Data.__init__(self, filename) + self._curData = None + self._retCurrent = False + + def next(self): + if self._retCurrent == True: + self._retCurrent = False + return self._curData + cont = self.prepline() + if cont[0] == "ok": + self._curData = cont[1:] + return self._curData + print('Ignoring line "' + " ".join(cont) + '"') + return self.next() + + def keepCurrent(self): + self._retCurrent = True + +class NormalData(Data): + def __init__(self, filename): + Data.__init__(self, filename) + +class BetterData(Data): + def __init__(self, filename): + Data.__init__(self, filename, "w") + + def writeLine(self, line): + self._file.write(" ".join(line) + "\n") + +def main(): + if len(sys.argv) < 5: + print("Bad arguments") + sys.exit(1) + + normalData = NormalData(sys.argv[1]) + extraData = ExtraData(sys.argv[2]) + betterData = BetterData(sys.argv[3]) + slack = int(sys.argv[4]) + for normal in normalData: + normalTime = int(normal[0]) + for extra in extraData: + extraTime = int(extra[0]) + if normalTime > extraTime: + print("Got unexpected extradata entry" + " ".join(extra)) + continue + if normalTime + slack < extraTime: + print("Got a data entry without extradata " + " ".join(normal)) + extraData.keepCurrent() + break + normal.extend(extra) + betterData.writeLine(normal) + break + + +if __name__ == "__main__": + main()
821ae1f40c643d0ee2f9cbdfbce83c6f75196895
quantum/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
quantum/db/migration/alembic_migrations/versions/1d76643bcec4_nvp_netbinding.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """nvp_netbinding Revision ID: 1d76643bcec4 Revises: 48b6f43f7471 Create Date: 2013-01-15 07:36:10.024346 """ # revision identifiers, used by Alembic. revision = '1d76643bcec4' down_revision = '48b6f43f7471' # Change to ['*'] if this migration applies to all plugins migration_for_plugins = [ 'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2' ] from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import mysql from quantum.db import migration def upgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.create_table( 'nvp_network_bindings', sa.Column('network_id', sa.String(length=36), nullable=False), sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'), nullable=False), sa.Column('tz_uuid', sa.String(length=36), nullable=True), sa.Column('vlan_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['network_id'], ['networks.id'], ondelete='CASCADE'), sa.PrimaryKeyConstraint('network_id')) def downgrade(active_plugin=None, options=None): if not migration.should_run(active_plugin, migration_for_plugins): return op.drop_table('nvp_network_bindings')
Add migration for network bindings in NVP plugin
Add migration for network bindings in NVP plugin Bug 1099895 Ensures the table nvp_network_bindings is created when upgrading database to head, by adding an appropriate alembic migration Change-Id: I4a794ed0ec6866d657cb2470d5aa67828e81aa75
Python
apache-2.0
gkotton/vmware-nsx,gkotton/vmware-nsx
--- +++ @@ -0,0 +1,63 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# +# Copyright 2013 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""nvp_netbinding + +Revision ID: 1d76643bcec4 +Revises: 48b6f43f7471 +Create Date: 2013-01-15 07:36:10.024346 + +""" + +# revision identifiers, used by Alembic. +revision = '1d76643bcec4' +down_revision = '48b6f43f7471' + +# Change to ['*'] if this migration applies to all plugins + +migration_for_plugins = [ + 'quantum.plugins.nicira.nicira_nvp_plugin.QuantumPlugin.NvpPluginV2' +] + +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import mysql + +from quantum.db import migration + + +def upgrade(active_plugin=None, options=None): + if not migration.should_run(active_plugin, migration_for_plugins): + return + + op.create_table( + 'nvp_network_bindings', + sa.Column('network_id', sa.String(length=36), nullable=False), + sa.Column('binding_type', sa.Enum('flat', 'vlan', 'stt', 'gre'), + nullable=False), + sa.Column('tz_uuid', sa.String(length=36), nullable=True), + sa.Column('vlan_id', sa.Integer(), nullable=True), + sa.ForeignKeyConstraint(['network_id'], ['networks.id'], + ondelete='CASCADE'), + sa.PrimaryKeyConstraint('network_id')) + + +def downgrade(active_plugin=None, options=None): + if not migration.should_run(active_plugin, migration_for_plugins): + return + + op.drop_table('nvp_network_bindings')
9bdf597477d513b84de84a6872e15833c25efa19
src/location_fetch.py
src/location_fetch.py
#!env python import database as db from database.model import Team, RouteDistance from geotools import simple_distance from geotools.routing import MapPoint from webapp.cfg.config import DB_CONNECTION print "init db..." db.init_session(connection_string=DB_CONNECTION) print "fetch teams..." teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).all() distances = [] print "fetch distances..." for (idx, team_from) in enumerate(teams): location_from = MapPoint.from_team(team_from) for team_to in teams[(idx + 1):]: location_to = MapPoint.from_team(team_to) dist = int(simple_distance(location_from, location_to) * 1000) distances.append(RouteDistance(location_from=team_from.location, location_to=team_to.location, distance=dist)) distances.append(RouteDistance(location_to=team_from.location, location_from=team_to.location, distance=dist)) print "write to db..." db.session.add_all(distances) db.session.commit()
Add a helper script for initial distance fetch.
Add a helper script for initial distance fetch.
Python
bsd-3-clause
janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system
--- +++ @@ -0,0 +1,30 @@ +#!env python + +import database as db +from database.model import Team, RouteDistance +from geotools import simple_distance +from geotools.routing import MapPoint +from webapp.cfg.config import DB_CONNECTION + +print "init db..." +db.init_session(connection_string=DB_CONNECTION) + +print "fetch teams..." +teams = db.session.query(Team).filter_by(deleted=False).filter_by(confirmed=True).all() + +distances = [] + +print "fetch distances..." +for (idx, team_from) in enumerate(teams): + location_from = MapPoint.from_team(team_from) + for team_to in teams[(idx + 1):]: + location_to = MapPoint.from_team(team_to) + + dist = int(simple_distance(location_from, location_to) * 1000) + + distances.append(RouteDistance(location_from=team_from.location, location_to=team_to.location, distance=dist)) + distances.append(RouteDistance(location_to=team_from.location, location_from=team_to.location, distance=dist)) + +print "write to db..." +db.session.add_all(distances) +db.session.commit()
ed8add068ef8cdbbe8bed412272a8a608e003bb9
tests/integration/modules/mac_service.py
tests/integration/modules/mac_service.py
# -*- coding: utf-8 -*- ''' integration tests for mac_service ''' # Import python libs from __future__ import absolute_import, print_function # Import Salt Testing libs from salttesting.helpers import ensure_in_syspath, destructiveTest ensure_in_syspath('../../') # Import salt libs import integration import salt.utils class MacServiceModuleTest(integration.ModuleCase): ''' Validate the mac_service module ''' def setUp(self): ''' Get current settings ''' if not salt.utils.is_darwin(): self.skipTest('Test only available on Mac OS X') if not salt.utils.which('launchctl'): self.skipTest('Test requires launchctl binary') if salt.utils.get_uid(salt.utils.get_user()) != 0: self.skipTest('Test requires root') def tearDown(self): ''' Reset to original settings ''' pass if __name__ == '__main__': from integration import run_tests run_tests(MacServiceModuleTest)
Add integration tests basic framework
Add integration tests basic framework
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
--- +++ @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +''' +integration tests for mac_service +''' + +# Import python libs +from __future__ import absolute_import, print_function + +# Import Salt Testing libs +from salttesting.helpers import ensure_in_syspath, destructiveTest +ensure_in_syspath('../../') + +# Import salt libs +import integration +import salt.utils + + +class MacServiceModuleTest(integration.ModuleCase): + ''' + Validate the mac_service module + ''' + + def setUp(self): + ''' + Get current settings + ''' + if not salt.utils.is_darwin(): + self.skipTest('Test only available on Mac OS X') + + if not salt.utils.which('launchctl'): + self.skipTest('Test requires launchctl binary') + + if salt.utils.get_uid(salt.utils.get_user()) != 0: + self.skipTest('Test requires root') + + def tearDown(self): + ''' + Reset to original settings + ''' + pass + + +if __name__ == '__main__': + from integration import run_tests + run_tests(MacServiceModuleTest) +
3b9a79e20bc3e48b44087c8c78e519e7085bce92
backend/scripts/conversion/addprojs.py
backend/scripts/conversion/addprojs.py
#!/usr/bin/env python import rethinkdb as r from optparse import OptionParser def main(conn): groups = list(r.table('usergroups').run(conn)) for group in groups: owner = group['owner'] projects = list(r.table('projects').filter({'owner': owner}) .pluck('id', 'name').run(conn)) group['projects'] = projects r.table('usergroups').get(group['id']).update(group).run(conn) samples = list(r.table('samples').run(conn)) for sample in samples: owner = sample['owner'] projects = list(r.table('projects').filter({'owner': owner}) .pluck('id', 'name').run(conn)) sample['projects'] = projects r.table('samples').get(sample['id']).update(sample).run(conn) if __name__ == "__main__": parser = OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') main(conn)
Add projects field to samples and usergroups.
Add projects field to samples and usergroups.
Python
mit
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
--- +++ @@ -0,0 +1,31 @@ +#!/usr/bin/env python + +import rethinkdb as r +from optparse import OptionParser + + +def main(conn): + groups = list(r.table('usergroups').run(conn)) + for group in groups: + owner = group['owner'] + projects = list(r.table('projects').filter({'owner': owner}) + .pluck('id', 'name').run(conn)) + group['projects'] = projects + r.table('usergroups').get(group['id']).update(group).run(conn) + + samples = list(r.table('samples').run(conn)) + for sample in samples: + owner = sample['owner'] + projects = list(r.table('projects').filter({'owner': owner}) + .pluck('id', 'name').run(conn)) + sample['projects'] = projects + r.table('samples').get(sample['id']).update(sample).run(conn) + +if __name__ == "__main__": + parser = OptionParser() + parser.add_option("-P", "--port", dest="port", type="int", + help="rethinkdb port", default=30815) + (options, args) = parser.parse_args() + + conn = r.connect('localhost', options.port, db='materialscommons') + main(conn)
d4eb11f846da5b38a56599ca18d68fd2344dec7c
DilipadCorpus.py
DilipadCorpus.py
"""Class to access dilipad corpus.""" import gensim import glob import codecs class DilipadCorpus(gensim.corpora.TextCorpus): def get_texts(self): for txt in self.input: with codecs.open(txt, 'rb', 'utf8') as f: words = f.read().split() yield words def __len__(self): return len(self.input) if __name__ == '__main__': files = glob.glob('/home/jvdzwaan/data/dilipad/txt-sample/*.txt') corpus = DilipadCorpus(files) print corpus.dictionary #for doc in corpus: # for w in doc: # print w print len(corpus.dictionary) a = [sum([f for w, f in doc]) for doc in corpus] print len(a) print sorted(a) print max(a) #for k, v in corpus.dictionary.iteritems(): # print k, v b = corpus.dictionary.keys() b.sort() #print b print corpus.dictionary.get(0)
Add class to access dilipad text data
Add class to access dilipad text data Currently only the nouns (topic words) are accessed. This class must be updated to also return the opinion words of a text. The idea is to have such a corpus per perspective for cross perspective topic modeling.
Python
apache-2.0
NLeSC/cptm,NLeSC/cptm
--- +++ @@ -0,0 +1,39 @@ +"""Class to access dilipad corpus.""" + +import gensim +import glob +import codecs + + +class DilipadCorpus(gensim.corpora.TextCorpus): + def get_texts(self): + for txt in self.input: + with codecs.open(txt, 'rb', 'utf8') as f: + words = f.read().split() + yield words + + def __len__(self): + return len(self.input) + + +if __name__ == '__main__': + files = glob.glob('/home/jvdzwaan/data/dilipad/txt-sample/*.txt') + + corpus = DilipadCorpus(files) + print corpus.dictionary + #for doc in corpus: + # for w in doc: + # print w + print len(corpus.dictionary) + a = [sum([f for w, f in doc]) for doc in corpus] + print len(a) + print sorted(a) + print max(a) + + #for k, v in corpus.dictionary.iteritems(): + # print k, v + b = corpus.dictionary.keys() + b.sort() + #print b + print corpus.dictionary.get(0) +
e850ab188e73f91bb4d85954d5f957ceb90d069a
cloudaux/tests/gcp/test_integration.py
cloudaux/tests/gcp/test_integration.py
import pytest import os from cloudaux.gcp.iam import get_project_iam_policy from cloudaux.gcp.gce.project import get_project from cloudaux.gcp.crm import get_iam_policy from cloudaux.gcp.gce.address import ( list_addresses, list_global_addresses, ) from cloudaux.gcp.gce.disk import ( list_disks, ) from cloudaux.gcp.gce.forwarding_rule import ( list_forwarding_rules, list_global_forwarding_rules, ) from cloudaux.gcp.gce.instance import ( list_instances ) from cloudaux.gcp.gce.zone import ( list_zones ) @pytest.fixture def project(): return os.getenv('CLOUDAUX_GCP_TEST_PROJECT') @pytest.mark.skipif( os.getenv('CLOUDAUX_GCP_TEST_PROJECT') is None, reason="Cannot run integration tests unless GCP project configured" ) @pytest.mark.parametrize('function,p_param', [ (list_addresses, 'project'), (list_forwarding_rules, 'project'), (list_global_addresses, 'project'), (list_global_forwarding_rules, 'project'), (get_iam_policy, 'resource'), (get_project, 'project'), (get_project_iam_policy, 'resource'), ]) def test_cloudaux_gcp_global_integration(function, p_param, project): result = function(**{p_param: project}) assert result is not None @pytest.mark.skipif( os.getenv('CLOUDAUX_GCP_TEST_PROJECT') is None, reason="Cannot run integration tests unless GCP project configured" ) @pytest.mark.parametrize('function,p_param,z_param', [ (list_disks, 'project', 'zone'), (list_instances, 'project', 'zone'), ]) def test_cloudaux_gcp_zoned_integration(function, p_param, z_param, project): for zone in list_zones(project=project): result = function(**{p_param: project, z_param: zone['name']}) assert result is not None
Add an integration test (disabled by default).
Add an integration test (disabled by default). This gives a way to check the Cloudaux GCP resource retrieval against a GCP test project. They are disabled unless the CLOUDAUX_GCP_TEST_PROJECT environment variable is defined.
Python
apache-2.0
Netflix-Skunkworks/cloudaux
--- +++ @@ -0,0 +1,58 @@ +import pytest +import os + +from cloudaux.gcp.iam import get_project_iam_policy +from cloudaux.gcp.gce.project import get_project +from cloudaux.gcp.crm import get_iam_policy +from cloudaux.gcp.gce.address import ( + list_addresses, + list_global_addresses, +) +from cloudaux.gcp.gce.disk import ( + list_disks, +) +from cloudaux.gcp.gce.forwarding_rule import ( + list_forwarding_rules, + list_global_forwarding_rules, +) +from cloudaux.gcp.gce.instance import ( + list_instances +) +from cloudaux.gcp.gce.zone import ( + list_zones +) + +@pytest.fixture +def project(): + return os.getenv('CLOUDAUX_GCP_TEST_PROJECT') + +@pytest.mark.skipif( + os.getenv('CLOUDAUX_GCP_TEST_PROJECT') is None, + reason="Cannot run integration tests unless GCP project configured" +) +@pytest.mark.parametrize('function,p_param', [ + (list_addresses, 'project'), + (list_forwarding_rules, 'project'), + (list_global_addresses, 'project'), + (list_global_forwarding_rules, 'project'), + (get_iam_policy, 'resource'), + (get_project, 'project'), + (get_project_iam_policy, 'resource'), +]) +def test_cloudaux_gcp_global_integration(function, p_param, project): + result = function(**{p_param: project}) + assert result is not None + +@pytest.mark.skipif( + os.getenv('CLOUDAUX_GCP_TEST_PROJECT') is None, + reason="Cannot run integration tests unless GCP project configured" +) +@pytest.mark.parametrize('function,p_param,z_param', [ + (list_disks, 'project', 'zone'), + (list_instances, 'project', 'zone'), +]) +def test_cloudaux_gcp_zoned_integration(function, p_param, z_param, project): + for zone in list_zones(project=project): + result = function(**{p_param: project, z_param: zone['name']}) + assert result is not None +
915364b232bfa5a433962bd474074d985a39d7fa
2_basic/recursion/recursion_examples.py
2_basic/recursion/recursion_examples.py
# -*- coding: utf-8 -*- """ A function is recursive when it calls itself (on a smaller piece of the problem). We need to provide a 'stopping criterion' or else the function will call itself indefinitely (therefore hanging the program). http://en.wikipedia.org/wiki/Recursion_(computer_science) You can find some simple examples of recursion below, but recursion will also be used in other examples (for instance in some sorting algorithms). """ def factorial(n): """A factorial of n (n!) is defined as the product of all positive integers less then or equal to n. According to the convention for an empty product, the value of factorial(0) (0!) is 1. >>> [factorial(i) for i in range(11)] [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800] """ # The stopping criterion is when we reach 1 or less if n <= 1: return 1 # n! = n * (n-1) * (n-2) * ... * 2 * 1, therefore # n! = n * (n-1)! return n * factorial(n-1) def gcd(a, b): """Find the greatest common divisor using Euclid's algorithm. >>> gcd(1, 3) 1 >>> gcd(2, 10) 2 >>> gcd(6, 9) 3 >>> gcd(17, 289) 17 >>> gcd(2512561, 152351) 1 """ if a % b == 0: return b return gcd(b, a % b) if __name__ == "__main__": import doctest doctest.testmod()
Add some simple recursion examples
Add some simple recursion examples
Python
mit
nightmarebadger/tutorials-python-basic
--- +++ @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +""" + A function is recursive when it calls itself (on a smaller piece of the + problem). We need to provide a 'stopping criterion' or else the function + will call itself indefinitely (therefore hanging the program). + http://en.wikipedia.org/wiki/Recursion_(computer_science) + + You can find some simple examples of recursion below, but recursion will + also be used in other examples (for instance in some sorting algorithms). +""" + + +def factorial(n): + """A factorial of n (n!) is defined as the product of all positive integers + less then or equal to n. According to the convention for an empty product, + the value of factorial(0) (0!) is 1. + + >>> [factorial(i) for i in range(11)] + [1, 1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800] + """ + + # The stopping criterion is when we reach 1 or less + if n <= 1: + return 1 + # n! = n * (n-1) * (n-2) * ... * 2 * 1, therefore + # n! = n * (n-1)! + return n * factorial(n-1) + + +def gcd(a, b): + """Find the greatest common divisor using Euclid's algorithm. + + >>> gcd(1, 3) + 1 + >>> gcd(2, 10) + 2 + >>> gcd(6, 9) + 3 + >>> gcd(17, 289) + 17 + >>> gcd(2512561, 152351) + 1 + """ + + if a % b == 0: + return b + return gcd(b, a % b) + + +if __name__ == "__main__": + import doctest + doctest.testmod()
f79edb442849785f3180756973a30eaff72d9821
freertos.py
freertos.py
import os.path def sources(base, port, mem_mang=None): core_src_names = [ 'croutine.c', 'event_groups.c', 'list.c', 'queue.c', 'stream_buffer.c', 'tasks.c', 'timers.c'] core_srcs = [os.path.join(base, 'Source', f) for f in core_src_names] port_path = os.path.join(base, 'Source', 'portable', 'GCC', port) port_candidates = ['port.c'] for src in port_candidates: src_path = os.path.join(port_path, src) core_srcs.append(src_path) if mem_mang is not None: core_srcs.append(os.path.join(base, 'Source', 'portable', 'MemMang', 'heap_%s.c' % mem_mang)) return core_srcs def includes(base, port): return [ os.path.join(base, 'Source', 'include'), os.path.join(base, 'Source', 'portable', 'GCC', port) ] def build_lib(scons_env, base, port, mem_mang=None, suffix=None): objects = list() replacement = '.o' if suffix: replacement = '_%s.o' % suffix for src in sources(base, port, mem_mang): target_name = src.replace('.c', replacement) objects.append(scons_env.Object(target=target_name, source=src)) libname = 'freertos' if suffix: libname = '%s_%s' % (libname, suffix) return scons_env.StaticLibrary(target=libname, source=objects)
Add utility module for compiling FreeRTOS
Add utility module for compiling FreeRTOS
Python
apache-2.0
google/cortex-demos,google/cortex-demos,google/cortex-demos,google/cortex-demos
--- +++ @@ -0,0 +1,47 @@ +import os.path + +def sources(base, port, mem_mang=None): + core_src_names = [ + 'croutine.c', + 'event_groups.c', + 'list.c', + 'queue.c', + 'stream_buffer.c', + 'tasks.c', + 'timers.c'] + + core_srcs = [os.path.join(base, 'Source', f) for f in core_src_names] + + port_path = os.path.join(base, 'Source', 'portable', 'GCC', port) + port_candidates = ['port.c'] + for src in port_candidates: + src_path = os.path.join(port_path, src) + core_srcs.append(src_path) + + if mem_mang is not None: + core_srcs.append(os.path.join(base, 'Source', 'portable', 'MemMang', 'heap_%s.c' % mem_mang)) + + return core_srcs + + +def includes(base, port): + return [ + os.path.join(base, 'Source', 'include'), + os.path.join(base, 'Source', 'portable', 'GCC', port) + ] + + +def build_lib(scons_env, base, port, mem_mang=None, suffix=None): + objects = list() + replacement = '.o' + if suffix: + replacement = '_%s.o' % suffix + + for src in sources(base, port, mem_mang): + target_name = src.replace('.c', replacement) + objects.append(scons_env.Object(target=target_name, source=src)) + + libname = 'freertos' + if suffix: + libname = '%s_%s' % (libname, suffix) + return scons_env.StaticLibrary(target=libname, source=objects)
df21b1d8e16dd2c1893b46b95608f85b62fe2081
src/stratisd_client_dbus/_implementation.py
src/stratisd_client_dbus/_implementation.py
# Copyright 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Classes to implement dbus interface. """ import abc from into_dbus_python import xformer class Interface(abc.ABC): """ Parent class for an interface hierarchy. """ _METHODS = abc.abstractproperty(doc="map from method name to data") _XFORMERS = abc.abstractproperty(doc="map from signature to xformer") _INTERFACE_NAME = abc.abstractproperty(doc="interface name") _PROPERTIES_INTERFACE_NAME = 'org.freedesktop.DBus.Properties' @classmethod def callMethod(cls, proxy_object, method_name, *args): """ Call a dbus method on a proxy object. :param proxy_object: the proxy object to invoke the method on :param method_name: a method name :param args: the arguments to pass to the dbus method :returns: the result of the call :rtype: object * int * str This method intentionally permits lower-level exceptions to be propagated. """ input_signature = cls._METHODS[method_name] if input_signature not in cls._XFORMERS: cls._XFORMERS[input_signature] = xformer(input_signature) xformed_args = cls._XFORMERS[input_signature](args) dbus_method = getattr(proxy_object, method_name) return dbus_method(*xformed_args, dbus_interface=cls._INTERFACE_NAME) @classmethod def getProperty(cls, proxy_object, name): """ Get a property with name 'name'. :param proxy_object: the proxy object :param str name: the name of the property :returns: the value of the property :rtype: object """ return proxy_object.Get( cls._INTERFACE_NAME, name, dbus_interface=cls._PROPERTIES_INTERFACE_NAME )
Add a simple super-class for interface definitions.
Add a simple super-class for interface definitions. Signed-off-by: mulhern <7b51bcf507bcd7afb72bf8663752c0ddbeb517f6@redhat.com>
Python
mpl-2.0
stratis-storage/stratisd,trgill/stratisd,mulkieran/stratisd,stratis-storage/stratisd-client-dbus,trgill/stratisd,stratis-storage/stratisd,mulkieran/stratisd,stratis-storage/stratisd
--- +++ @@ -0,0 +1,72 @@ +# Copyright 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Classes to implement dbus interface. +""" + +import abc + +from into_dbus_python import xformer + +class Interface(abc.ABC): + """ + Parent class for an interface hierarchy. + """ + + _METHODS = abc.abstractproperty(doc="map from method name to data") + _XFORMERS = abc.abstractproperty(doc="map from signature to xformer") + _INTERFACE_NAME = abc.abstractproperty(doc="interface name") + _PROPERTIES_INTERFACE_NAME = 'org.freedesktop.DBus.Properties' + + @classmethod + def callMethod(cls, proxy_object, method_name, *args): + """ + Call a dbus method on a proxy object. + + :param proxy_object: the proxy object to invoke the method on + :param method_name: a method name + :param args: the arguments to pass to the dbus method + + :returns: the result of the call + :rtype: object * int * str + + This method intentionally permits lower-level exceptions to be + propagated. + """ + input_signature = cls._METHODS[method_name] + + if input_signature not in cls._XFORMERS: + cls._XFORMERS[input_signature] = xformer(input_signature) + xformed_args = cls._XFORMERS[input_signature](args) + + dbus_method = getattr(proxy_object, method_name) + return dbus_method(*xformed_args, dbus_interface=cls._INTERFACE_NAME) + + @classmethod + def getProperty(cls, proxy_object, name): + """ + Get a property with name 'name'. + + :param proxy_object: the proxy object + :param str name: the name of the property + + :returns: the value of the property + :rtype: object + """ + return proxy_object.Get( + cls._INTERFACE_NAME, + name, + dbus_interface=cls._PROPERTIES_INTERFACE_NAME + )
9d1e404eaf8e78efd6117266baf86ff3228915da
src/img2line.py
src/img2line.py
# -*- coding: utf-8 -*- import numpy as np from PIL import Image from pylab import * import types from skimage import io, data # 读取图片,灰度化,并转为数组 im0 = Image.open("test.jpeg").convert('L') im = array(im0) # print(type(im[1, 1])) x = y = 0 m = im.shape[0] n = im.shape[1] h = range(m - 2) k = range(n - 2) matrix = np.array([[255 for i in range(n)] for i in range(m)]) limit = 255 gray() # 将图像处理成“点图” for x in h[::2]: for y in k[::2]: im[x + 1, y + 1] = (int(im[x, y]) + int(im[x, y + 2]) + int( im[x + 1, y + 1]) + int(im[x + 2, y]) + int(im[x + 2, y + 2])) / 5 im[x, y] = 255 # 在“点图”中连线 imshow(im) # imshow(matrix) #io.imsave('dot.jpg', matrix) # im0.save("test.jpg") show()
Convert a image to many lines.
Convert a image to many lines.
Python
apache-2.0
xpeng2333/robodraw,xpeng2333/robodraw,xpeng2333/robodraw,xpeng2333/robodraw
--- +++ @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +import numpy as np +from PIL import Image +from pylab import * +import types +from skimage import io, data + +# 读取图片,灰度化,并转为数组 +im0 = Image.open("test.jpeg").convert('L') +im = array(im0) + +# print(type(im[1, 1])) + +x = y = 0 +m = im.shape[0] +n = im.shape[1] +h = range(m - 2) +k = range(n - 2) +matrix = np.array([[255 for i in range(n)] for i in range(m)]) +limit = 255 + +gray() + +# 将图像处理成“点图” +for x in h[::2]: + for y in k[::2]: + im[x + 1, y + 1] = (int(im[x, y]) + int(im[x, y + 2]) + int( + im[x + 1, y + 1]) + int(im[x + 2, y]) + int(im[x + 2, y + 2])) / 5 + im[x, y] = 255 + +# 在“点图”中连线 + +imshow(im) +# imshow(matrix) +#io.imsave('dot.jpg', matrix) + +# im0.save("test.jpg") + +show()
291538ca1de9605865d35cd8e30e4cd2f8a74cd3
wagtail/admin/tests/ui/test_side_panels.py
wagtail/admin/tests/ui/test_side_panels.py
from unittest import TestCase from wagtail.admin.ui.side_panels import BaseSidePanel, BaseSidePanels class SidePanelA(BaseSidePanel): order = 300 class SidePanelB(BaseSidePanel): order = 200 class SidePanelC(BaseSidePanel): order = 400 class MySidePanels(BaseSidePanels): def __init__(self, request, object): super().__init__(request, object) self.side_panels = [ SidePanelA(object, request), SidePanelB(object, request), SidePanelC(object, request), ] class TestSidePanels(TestCase): def test_ordering(self): panels = MySidePanels(None, None) self.assertSequenceEqual( [type(panel) for panel in panels], [SidePanelB, SidePanelA, SidePanelC], )
Add test for ensuring side panel ordering
Add test for ensuring side panel ordering
Python
bsd-3-clause
zerolab/wagtail,thenewguy/wagtail,zerolab/wagtail,thenewguy/wagtail,wagtail/wagtail,rsalmaso/wagtail,rsalmaso/wagtail,thenewguy/wagtail,wagtail/wagtail,wagtail/wagtail,wagtail/wagtail,zerolab/wagtail,wagtail/wagtail,thenewguy/wagtail,zerolab/wagtail,zerolab/wagtail,rsalmaso/wagtail,rsalmaso/wagtail,thenewguy/wagtail,rsalmaso/wagtail
--- +++ @@ -0,0 +1,34 @@ +from unittest import TestCase + +from wagtail.admin.ui.side_panels import BaseSidePanel, BaseSidePanels + + +class SidePanelA(BaseSidePanel): + order = 300 + + +class SidePanelB(BaseSidePanel): + order = 200 + + +class SidePanelC(BaseSidePanel): + order = 400 + + +class MySidePanels(BaseSidePanels): + def __init__(self, request, object): + super().__init__(request, object) + self.side_panels = [ + SidePanelA(object, request), + SidePanelB(object, request), + SidePanelC(object, request), + ] + + +class TestSidePanels(TestCase): + def test_ordering(self): + panels = MySidePanels(None, None) + self.assertSequenceEqual( + [type(panel) for panel in panels], + [SidePanelB, SidePanelA, SidePanelC], + )
a52001442a4cb18734fff98a01f175c57c9dbf81
fedimg/services/ec2/ec2imgpublisher.py
fedimg/services/ec2/ec2imgpublisher.py
# This file is part of fedimg. # Copyright (C) 2014-2017 Red Hat, Inc. # # fedimg is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # fedimg is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public # License along with fedimg; if not, see http://www.gnu.org/licenses, # or write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # # Authors: Sayan Chowdhury <sayanchowdhury@fedoraproject.org> # from fedimg.utils import external_run_command, get_item_from_regex from fedimg.ec2.ec2base import EC2Base class EC2ImagePublisher(EC2Base): """ Comment goes here """ def __init__(self, **kwargs): defaults = { 'access_key': None, 'image_id': None, 'region': None, 'secret_key': None, 'visibility': 'all' } for (prop, default) in defaults.iteritems(): setattr(self, prop, kwargs.get(prop, default)) def get_snapshot_from_image_id(self, image): """ Comment goes here """ if isinstance(image, str): image_id = image image = self._connect().get_image(image_id) snapshot_id = image.extra['block_device_mapping']['snapshot_id'] snapshots = self._connect().list_snapshots() for snapshot in snapshots: if snapshot.id == snapshot_id: return snapshot def publish_images(self, image_ids=None): """ Comment goes here """ driver = self._connect() images = driver.list_images(ex_image_ids=image_ids) for image in images: driver.ex_modify_image_attribute(image, { 'LaunchPermission.Add.1.Group': 'all'}) snapshot = self.get_snapshot_from_image_id(image.id) driver.ex_modify_snapshot_attribute(snapshot, { 'CreateVolumePermission.Add.1.Group': 'all'})
Write a publisher that would make the images & the snapshot public
ec2: Write a publisher that would make the images & the snapshot public Signed-off-by: Sayan Chowdhury <5f0367a2b3b757615b57f51d912cf16f2c0ad827@gmail.com>
Python
agpl-3.0
fedora-infra/fedimg,fedora-infra/fedimg
--- +++ @@ -0,0 +1,64 @@ +# This file is part of fedimg. +# Copyright (C) 2014-2017 Red Hat, Inc. +# +# fedimg is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# fedimg is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public +# License along with fedimg; if not, see http://www.gnu.org/licenses, +# or write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +# +# Authors: Sayan Chowdhury <sayanchowdhury@fedoraproject.org> +# + +from fedimg.utils import external_run_command, get_item_from_regex +from fedimg.ec2.ec2base import EC2Base + + +class EC2ImagePublisher(EC2Base): + """ Comment goes here """ + + def __init__(self, **kwargs): + defaults = { + 'access_key': None, + 'image_id': None, + 'region': None, + 'secret_key': None, + 'visibility': 'all' + } + + for (prop, default) in defaults.iteritems(): + setattr(self, prop, kwargs.get(prop, default)) + + def get_snapshot_from_image_id(self, image): + """ Comment goes here """ + if isinstance(image, str): + image_id = image + image = self._connect().get_image(image_id) + + snapshot_id = image.extra['block_device_mapping']['snapshot_id'] + snapshots = self._connect().list_snapshots() + for snapshot in snapshots: + if snapshot.id == snapshot_id: + return snapshot + + def publish_images(self, image_ids=None): + """ Comment goes here """ + driver = self._connect() + images = driver.list_images(ex_image_ids=image_ids) + + for image in images: + driver.ex_modify_image_attribute(image, { + 'LaunchPermission.Add.1.Group': 'all'}) + + snapshot = self.get_snapshot_from_image_id(image.id) + driver.ex_modify_snapshot_attribute(snapshot, { + 'CreateVolumePermission.Add.1.Group': 'all'})
a617c4bd3b38b2d364a3f12a0cb066559f8c8ae3
tests/test_alchemy.py
tests/test_alchemy.py
import unittest import sqlalchemy as sa from pga.alchemy import sa_column_dict_to_column class TestAlchemySchema(unittest.TestCase): def test_sa_column_dict_to_column(self): column_name = 'my_mock_column' column_type = sa.CHAR(length=2) nullable = False attributes = { 'autoincrement': False, 'default': None, 'name': column_name, 'nullable': nullable, 'type': column_type } result_column = sa_column_dict_to_column(attributes) result = {attribute: getattr(result_column, attribute) for attribute in attributes.keys()} self.assertEqual(attributes, result)
Add specification for alchemy schema.
Add specification for alchemy schema.
Python
mit
portfoliome/pgawedge
--- +++ @@ -0,0 +1,24 @@ +import unittest + +import sqlalchemy as sa + +from pga.alchemy import sa_column_dict_to_column + + +class TestAlchemySchema(unittest.TestCase): + + def test_sa_column_dict_to_column(self): + column_name = 'my_mock_column' + column_type = sa.CHAR(length=2) + nullable = False + + attributes = { + 'autoincrement': False, 'default': None, 'name': column_name, + 'nullable': nullable, 'type': column_type + } + + result_column = sa_column_dict_to_column(attributes) + result = {attribute: getattr(result_column, attribute) + for attribute in attributes.keys()} + + self.assertEqual(attributes, result)
0cd94c71db19c0a53d1d97f353116e271884a336
utils/sort_includes.py
utils/sort_includes.py
#!/usr/bin/env python """Script to sort the top-most block of #include lines. Assumes the LLVM coding conventions. Currently, this script only bothers sorting the llvm/... headers. Patches welcome for more functionality, and sorting other header groups. """ import argparse import os import re import sys import tempfile def sort_includes(f): lines = f.readlines() look_for_api_header = f.name[-4:] == '.cpp' headers_begin = 0 headers_end = 0 api_headers = [] local_headers = [] project_headers = [] system_headers = [] for (i, l) in enumerate(lines): if l.strip() == '': continue if l.startswith('#include'): if headers_begin == 0: headers_begin = i headers_end = i header = l[len('#include'):].lstrip() if look_for_api_header and header.startswith('"'): api_headers.append(header) look_for_api_header = False continue if header.startswith('<'): system_headers.append(header) continue if header.startswith('"llvm/') or header.startswith('"clang/'): project_headers.append(header) continue local_headers.append(header) continue # Only allow comments and #defines prior to any includes. If either are # mixed with includes, the order might be sensitive. if headers_begin != 0: break if l.startswith('//') or l.startswith('#define'): continue break if headers_begin == 0: return local_headers.sort() project_headers.sort() system_headers.sort() headers = api_headers + local_headers + project_headers + system_headers header_lines = ['#include ' + h for h in headers] lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:] #for l in lines[headers_begin:headers_end]: # print l.rstrip() f.seek(0) f.truncate() f.writelines(lines) def main(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('files', nargs='+', type=argparse.FileType('r+'), help='the source files to sort includes within') args = parser.parse_args() for f in args.files: sort_includes(f) if __name__ == '__main__': main()
Add a completely hack-ish tool to sort includes according to the coding standards.
Add a completely hack-ish tool to sort includes according to the coding standards. I am a terrible Python programmer. Patches more the welcome. Please tell me how this should look if it should look differently. It's just a tiny little script so it didn't make sense to go through pre-commit review, especially as someone who actually knows python may want to just rip it apart and do it The Right Way. I will be preparing a commit shortly that uses this script to canonicalize *all* of the #include lines in LLVM. Really, all of them. git-svn-id: 0ff597fd157e6f4fc38580e8d64ab130330d2411@169125 91177308-0d34-0410-b5e6-96231b3b80d8
Python
bsd-2-clause
dslab-epfl/asap,llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,apple/swift-llvm,llvm-mirror/llvm,llvm-mirror/llvm,apple/swift-llvm,GPUOpen-Drivers/llvm,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,apple/swift-llvm,dslab-epfl/asap,llvm-mirror/llvm,GPUOpen-Drivers/llvm,apple/swift-llvm,chubbymaggie/asap,apple/swift-llvm,dslab-epfl/asap,GPUOpen-Drivers/llvm,chubbymaggie/asap,GPUOpen-Drivers/llvm,chubbymaggie/asap,dslab-epfl/asap,chubbymaggie/asap,apple/swift-llvm,dslab-epfl/asap,dslab-epfl/asap,apple/swift-llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,llvm-mirror/llvm,llvm-mirror/llvm,GPUOpen-Drivers/llvm,chubbymaggie/asap,llvm-mirror/llvm
--- +++ @@ -0,0 +1,79 @@ +#!/usr/bin/env python + +"""Script to sort the top-most block of #include lines. + +Assumes the LLVM coding conventions. + +Currently, this script only bothers sorting the llvm/... headers. Patches +welcome for more functionality, and sorting other header groups. +""" + +import argparse +import os +import re +import sys +import tempfile + +def sort_includes(f): + lines = f.readlines() + look_for_api_header = f.name[-4:] == '.cpp' + headers_begin = 0 + headers_end = 0 + api_headers = [] + local_headers = [] + project_headers = [] + system_headers = [] + for (i, l) in enumerate(lines): + if l.strip() == '': + continue + if l.startswith('#include'): + if headers_begin == 0: + headers_begin = i + headers_end = i + header = l[len('#include'):].lstrip() + if look_for_api_header and header.startswith('"'): + api_headers.append(header) + look_for_api_header = False + continue + if header.startswith('<'): + system_headers.append(header) + continue + if header.startswith('"llvm/') or header.startswith('"clang/'): + project_headers.append(header) + continue + local_headers.append(header) + continue + + # Only allow comments and #defines prior to any includes. If either are + # mixed with includes, the order might be sensitive. + if headers_begin != 0: + break + if l.startswith('//') or l.startswith('#define'): + continue + break + if headers_begin == 0: + return + + local_headers.sort() + project_headers.sort() + system_headers.sort() + headers = api_headers + local_headers + project_headers + system_headers + header_lines = ['#include ' + h for h in headers] + lines = lines[:headers_begin] + header_lines + lines[headers_end + 1:] + + #for l in lines[headers_begin:headers_end]: + # print l.rstrip() + f.seek(0) + f.truncate() + f.writelines(lines) + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument('files', nargs='+', type=argparse.FileType('r+'), + help='the source files to sort includes within') + args = parser.parse_args() + for f in args.files: + sort_includes(f) + +if __name__ == '__main__': + main()
8e5617d8c0279c871c0d78bc3ad5d3676d35cbce
setup.py
setup.py
#from distutils.core import setup from setuptools import setup filename = 'tweetfeels/version.py' exec(compile(open(filename, "rb").read(), filename, 'exec')) setup(name='tweetfeels', version=__version__, description='Real-time sentiment analysis for twitter.', author='Thomas Chen', author_email='tkchen@gmail.com', url='https://github.com/uclatommy/tweetfeels', download_url='https://github.com/uclatommy/tweetfeels/tarball/{}'.format( __version__ ), packages=['tweetfeels'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering :: Artificial Intelligence' ], install_requires=[ 'tweepy', 'h5py', 'nltk', 'numpy', 'oauthlib', 'pandas', 'python-dateutil', 'pytz', 'requests', 'requests-oauthlib', 'six', 'twython' ], test_suite='nose.collector', tests_require=['nose'] )
#from distutils.core import setup from setuptools import setup import os try: import pypandoc long_description = pypandoc.convert('README.md', 'rst') except(IOError, ImportError): long_description = 'Real-time sentiment analysis for twitter.' filename = 'tweetfeels/version.py' exec(compile(open(filename, "rb").read(), filename, 'exec')) setup(name='tweetfeels', version=__version__, description='Real-time sentiment analysis for twitter.', long_description=long_description, author='Thomas Chen', author_email='tkchen@gmail.com', url='https://github.com/uclatommy/tweetfeels', download_url='https://github.com/uclatommy/tweetfeels/tarball/{}'.format( __version__ ), packages=['tweetfeels'], classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering :: Artificial Intelligence' ], install_requires=[ 'tweepy', 'h5py', 'nltk', 'numpy', 'oauthlib', 'pandas', 'python-dateutil', 'pytz', 'requests', 'requests-oauthlib', 'six', 'twython' ], test_suite='nose.collector', tests_require=['nose'] )
Use pandoc to convert read to rst and supply that as long_description to pypi.
Use pandoc to convert read to rst and supply that as long_description to pypi.
Python
bsd-3-clause
uclatommy/tweetfeels
--- +++ @@ -1,5 +1,12 @@ #from distutils.core import setup from setuptools import setup +import os + +try: + import pypandoc + long_description = pypandoc.convert('README.md', 'rst') +except(IOError, ImportError): + long_description = 'Real-time sentiment analysis for twitter.' filename = 'tweetfeels/version.py' exec(compile(open(filename, "rb").read(), filename, 'exec')) @@ -7,6 +14,7 @@ setup(name='tweetfeels', version=__version__, description='Real-time sentiment analysis for twitter.', + long_description=long_description, author='Thomas Chen', author_email='tkchen@gmail.com', url='https://github.com/uclatommy/tweetfeels',
ed48ce514c40e0f4e68e691091ed5bad91f273a6
python/runRoot2json.py
python/runRoot2json.py
""" 1 July 2017 Dan Marley daniel.edison.marley@cernSPAMNOT.ch Texas A&M University ----- Execute the root2json class to convert ROOT data into JSON format for neural network training/testing/etc. in python. """ from info import VERBOSE from root2json import Root2json vb = VERBOSE() vb.level = "INFO" vb.INFO("RUN > Set up the root2json object") r2j = Root2json() ## Define properties (can put this into config file later, if wanted) vb.INFO("RUN > Define properties for convertin ROOT to JSON") r2j.verbose_level = "INFO" # Setup verbose output r2j.outpath = "./" # where to store output r2j.listOfFiles = "share/listOfFiles_testNN.txt" # ROOT files to process r2j.nEntries = 5 # Properties for large-R jets and such r2j.ljet_charge_max = 5. r2j.ljet_pt_cut = 300000. r2j.ljet_eta_cut = 2. r2j.tjet_pt_cut = 10000. r2j.deltaR_tru = 0.75 r2j.deltaR_tjet = 0.8 # ljet R = 1.0; tjet R = 0.2 r2j.t_index = 1 # +2/3 charge r2j.tbar_index = 0 # -2/3 charge r2j.nsubjets = 3 # number of ghost-associated track jets to save r2j.parton_def = 'afterFSR' # truth parton definition r2j.success = '\x01' # if something is b-tagged (type char) r2j.btag_wkpt = "77" # this isn't necessarily needed anymore for actual selection # Setup and run the code vb.INFO("RUN > Initialize") r2j.initialize() vb.INFO("RUN > Execute") r2j.execute() vb.INFO("RUN > Finished") ## THE END ##
Add missing script to use the root2json class
Add missing script to use the root2json class
Python
mit
cms-ttbarAC/CyMiniAna,cms-ttbarAC/CyMiniAna,cms-ttbarAC/CyMiniAna
--- +++ @@ -0,0 +1,53 @@ +""" +1 July 2017 + +Dan Marley +daniel.edison.marley@cernSPAMNOT.ch +Texas A&M University +----- + +Execute the root2json class to convert +ROOT data into JSON format for neural network +training/testing/etc. in python. +""" +from info import VERBOSE +from root2json import Root2json + +vb = VERBOSE() +vb.level = "INFO" + +vb.INFO("RUN > Set up the root2json object") +r2j = Root2json() + +## Define properties (can put this into config file later, if wanted) +vb.INFO("RUN > Define properties for convertin ROOT to JSON") +r2j.verbose_level = "INFO" # Setup verbose output +r2j.outpath = "./" # where to store output +r2j.listOfFiles = "share/listOfFiles_testNN.txt" # ROOT files to process +r2j.nEntries = 5 + +# Properties for large-R jets and such +r2j.ljet_charge_max = 5. +r2j.ljet_pt_cut = 300000. +r2j.ljet_eta_cut = 2. +r2j.tjet_pt_cut = 10000. +r2j.deltaR_tru = 0.75 +r2j.deltaR_tjet = 0.8 # ljet R = 1.0; tjet R = 0.2 +r2j.t_index = 1 # +2/3 charge +r2j.tbar_index = 0 # -2/3 charge +r2j.nsubjets = 3 # number of ghost-associated track jets to save +r2j.parton_def = 'afterFSR' # truth parton definition +r2j.success = '\x01' # if something is b-tagged (type char) +r2j.btag_wkpt = "77" # this isn't necessarily needed anymore for actual selection + +# Setup and run the code +vb.INFO("RUN > Initialize") +r2j.initialize() + +vb.INFO("RUN > Execute") +r2j.execute() + +vb.INFO("RUN > Finished") + +## THE END ## +
ea63340282b7eba0d4b4f357808b8e374a3fcdf8
usr/examples/14-WiFi-Shield/fw_update.py
usr/examples/14-WiFi-Shield/fw_update.py
''' Firmware update examples Note: copy the WINC1500/firmware folder to uSD ''' import time, network # Init wlan module in Download mode wlan = network.WINC(True) #print("Firmware version:", wlan.fw_version()) # Start the firmware update process. wlan.fw_update() #print("Firmware version:", wlan.fw_version())
Add WINC1500 fw update script.
Add WINC1500 fw update script.
Python
mit
kwagyeman/openmv,openmv/openmv,openmv/openmv,kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv,iabdalkader/openmv,iabdalkader/openmv
--- +++ @@ -0,0 +1,13 @@ +''' + Firmware update examples + Note: copy the WINC1500/firmware folder to uSD +''' +import time, network + +# Init wlan module in Download mode +wlan = network.WINC(True) +#print("Firmware version:", wlan.fw_version()) + +# Start the firmware update process. +wlan.fw_update() +#print("Firmware version:", wlan.fw_version())
8ac4de6438488f3a24b13959e4effa644474609b
cocktails/drinks/migrations/0003_drink_owner.py
cocktails/drinks/migrations/0003_drink_owner.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-02 23:00 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('drinks', '0002_auto_20160602_1908'), ] operations = [ migrations.AddField( model_name='drink', name='owner', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='drinks', to=settings.AUTH_USER_MODEL), preserve_default=False, ), ]
Fix default value for owner
Fix default value for owner
Python
mit
jake-jake-jake/cocktails,jake-jake-jake/cocktails,jake-jake-jake/cocktails,jake-jake-jake/cocktails
--- +++ @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.6 on 2016-06-02 23:00 +from __future__ import unicode_literals + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('drinks', '0002_auto_20160602_1908'), + ] + + operations = [ + migrations.AddField( + model_name='drink', + name='owner', + field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='drinks', to=settings.AUTH_USER_MODEL), + preserve_default=False, + ), + ]
3465cd435547444d16c6ce479f22371b6fa00c4d
printjson.py
printjson.py
#!/usr/bin/env python3 import json from pprint import pprint with open('data.json') as data_file: data = json.load(data_file) pprint(data)
Read and print json data test.
Read and print json data test.
Python
mit
magnetmagnate/daily-grind,magnetmagnate/daily-grind
--- +++ @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 + +import json +from pprint import pprint + +with open('data.json') as data_file: + data = json.load(data_file) + +pprint(data) +
c59d27223af702a55a603cfab5e06a4579a4435b
scipy/interpolate/tests/test_gil.py
scipy/interpolate/tests/test_gil.py
from __future__ import division, print_function, absolute_import import itertools import threading import time import numpy as np from numpy.testing import TestCase, assert_equal, run_module_suite from numpy.testing.decorators import slow import scipy.interpolate from scipy._lib._testutils import knownfailure_overridable class TestGIL(TestCase): """Check if the GIL is properly released by scipy.interpolate functions.""" def setUp(self): self.messages = [] def log(self, message): self.messages.append(message) def make_worker_thread(self, target, args): log = self.log class WorkerThread(threading.Thread): def run(self): log('interpolation started') target(*args) log('interpolation complete') return WorkerThread() @slow @knownfailure_overridable('race conditions, may depend on system load') def test_rectbivariatespline(self): def generate_params(n_points): x = y = np.linspace(0, 1000, n_points) x_grid, y_grid = np.meshgrid(x, y) z = x_grid * y_grid return x, y, z def calibrate_delay(requested_time): for n_points in itertools.count(5000, 1000): args = generate_params(n_points) time_started = time.time() interpolate(*args) if time.time() - time_started > requested_time: return args def interpolate(x, y, z): scipy.interpolate.RectBivariateSpline(x, y, z) args = calibrate_delay(requested_time=3) worker_thread = self.make_worker_thread(interpolate, args) worker_thread.start() for i in range(3): time.sleep(0.5) self.log('working') worker_thread.join() assert_equal(self.messages, [ 'interpolation started', 'working', 'working', 'working', 'interpolation complete', ]) if __name__ == "__main__": run_module_suite()
Add a test for GIL release in scipy.interpolate.RectBivariateSpline.
TST: Add a test for GIL release in scipy.interpolate.RectBivariateSpline.
Python
bsd-3-clause
perimosocordiae/scipy,pizzathief/scipy,niknow/scipy,Shaswat27/scipy,Newman101/scipy,Gillu13/scipy,tylerjereddy/scipy,fernand/scipy,ortylp/scipy,newemailjdm/scipy,jjhelmus/scipy,vanpact/scipy,giorgiop/scipy,mortada/scipy,surhudm/scipy,felipebetancur/scipy,jseabold/scipy,endolith/scipy,futurulus/scipy,vanpact/scipy,rgommers/scipy,andyfaff/scipy,endolith/scipy,niknow/scipy,behzadnouri/scipy,gdooper/scipy,fernand/scipy,anntzer/scipy,mtrbean/scipy,tylerjereddy/scipy,surhudm/scipy,arokem/scipy,rmcgibbo/scipy,njwilson23/scipy,ndchorley/scipy,mhogg/scipy,WarrenWeckesser/scipy,zxsted/scipy,pschella/scipy,person142/scipy,apbard/scipy,befelix/scipy,petebachant/scipy,scipy/scipy,cpaulik/scipy,Gillu13/scipy,jseabold/scipy,mortada/scipy,pbrod/scipy,njwilson23/scipy,nmayorov/scipy,Stefan-Endres/scipy,haudren/scipy,fredrikw/scipy,raoulbq/scipy,Eric89GXL/scipy,WillieMaddox/scipy,WarrenWeckesser/scipy,Newman101/scipy,kalvdans/scipy,mhogg/scipy,futurulus/scipy,maniteja123/scipy,jamestwebber/scipy,surhudm/scipy,njwilson23/scipy,ChanderG/scipy,nmayorov/scipy,ales-erjavec/scipy,WillieMaddox/scipy,argriffing/scipy,sonnyhu/scipy,woodscn/scipy,zxsted/scipy,ortylp/scipy,pschella/scipy,sriki18/scipy,matthew-brett/scipy,argriffing/scipy,Eric89GXL/scipy,maniteja123/scipy,matthewalbani/scipy,raoulbq/scipy,jamestwebber/scipy,pyramania/scipy,andyfaff/scipy,matthew-brett/scipy,vanpact/scipy,chatcannon/scipy,jonycgn/scipy,vberaudi/scipy,chatcannon/scipy,zxsted/scipy,Newman101/scipy,FRidh/scipy,rgommers/scipy,mtrbean/scipy,arokem/scipy,gef756/scipy,e-q/scipy,chatcannon/scipy,Srisai85/scipy,chatcannon/scipy,woodscn/scipy,jonycgn/scipy,gef756/scipy,haudren/scipy,Kamp9/scipy,jakevdp/scipy,trankmichael/scipy,haudren/scipy,raoulbq/scipy,futurulus/scipy,mikebenfield/scipy,mgaitan/scipy,mhogg/scipy,WarrenWeckesser/scipy,gertingold/scipy,niknow/scipy,ilayn/scipy,chatcannon/scipy,sriki18/scipy,mdhaber/scipy,vigna/scipy,lhilt/scipy,pizzathief/scipy,mingwpy/scipy,gertingold/scipy,befelix/scipy,mingwpy/scipy,giorgiop/scipy,endolith/scipy,ales-erjavec/scipy,lhilt/scipy,Shaswat27/scipy,kleskjr/scipy,bkendzior/scipy,mtrbean/scipy,gef756/scipy,andim/scipy,lukauskas/scipy,newemailjdm/scipy,Stefan-Endres/scipy,ndchorley/scipy,Srisai85/scipy,ChanderG/scipy,sonnyhu/scipy,vberaudi/scipy,Shaswat27/scipy,maniteja123/scipy,anielsen001/scipy,hainm/scipy,gdooper/scipy,jseabold/scipy,matthew-brett/scipy,trankmichael/scipy,anielsen001/scipy,larsmans/scipy,aman-iitj/scipy,Kamp9/scipy,mikebenfield/scipy,Newman101/scipy,Shaswat27/scipy,rmcgibbo/scipy,Stefan-Endres/scipy,raoulbq/scipy,pbrod/scipy,pnedunuri/scipy,matthewalbani/scipy,richardotis/scipy,petebachant/scipy,richardotis/scipy,pnedunuri/scipy,aeklant/scipy,pnedunuri/scipy,sriki18/scipy,arokem/scipy,Newman101/scipy,njwilson23/scipy,mdhaber/scipy,lukauskas/scipy,scipy/scipy,petebachant/scipy,ales-erjavec/scipy,sonnyhu/scipy,zaxliu/scipy,larsmans/scipy,zerothi/scipy,woodscn/scipy,endolith/scipy,felipebetancur/scipy,mdhaber/scipy,kleskjr/scipy,mdhaber/scipy,perimosocordiae/scipy,fernand/scipy,pnedunuri/scipy,vanpact/scipy,kalvdans/scipy,Gillu13/scipy,ortylp/scipy,endolith/scipy,trankmichael/scipy,jor-/scipy,mgaitan/scipy,pbrod/scipy,futurulus/scipy,josephcslater/scipy,Gillu13/scipy,josephcslater/scipy,woodscn/scipy,minhlongdo/scipy,sonnyhu/scipy,WillieMaddox/scipy,sriki18/scipy,sriki18/scipy,josephcslater/scipy,kalvdans/scipy,vberaudi/scipy,vberaudi/scipy,anielsen001/scipy,nmayorov/scipy,giorgiop/scipy,fredrikw/scipy,mingwpy/scipy,newemailjdm/scipy,andyfaff/scipy,niknow/scipy,larsmans/scipy,giorgiop/scipy,bkendzior/scipy,mikebenfield/scipy,FRidh/scipy,andim/scipy,sauliusl/scipy,aman-iitj/scipy,sauliusl/scipy,maniteja123/scipy,aman-iitj/scipy,ilayn/scipy,ChanderG/scipy,cpaulik/scipy,grlee77/scipy,lukauskas/scipy,vberaudi/scipy,pnedunuri/scipy,fernand/scipy,nonhermitian/scipy,gdooper/scipy,zaxliu/scipy,chatcannon/scipy,tylerjereddy/scipy,pyramania/scipy,rgommers/scipy,richardotis/scipy,pyramania/scipy,lhilt/scipy,befelix/scipy,FRidh/scipy,ortylp/scipy,jonycgn/scipy,mhogg/scipy,endolith/scipy,Srisai85/scipy,Kamp9/scipy,anntzer/scipy,cpaulik/scipy,WarrenWeckesser/scipy,fernand/scipy,andim/scipy,trankmichael/scipy,apbard/scipy,ndchorley/scipy,haudren/scipy,mgaitan/scipy,grlee77/scipy,perimosocordiae/scipy,jamestwebber/scipy,Stefan-Endres/scipy,anielsen001/scipy,ChanderG/scipy,nonhermitian/scipy,grlee77/scipy,gdooper/scipy,mtrbean/scipy,mingwpy/scipy,Srisai85/scipy,aeklant/scipy,pbrod/scipy,bkendzior/scipy,gfyoung/scipy,vigna/scipy,jjhelmus/scipy,mortada/scipy,ortylp/scipy,hainm/scipy,minhlongdo/scipy,ndchorley/scipy,pbrod/scipy,argriffing/scipy,kleskjr/scipy,befelix/scipy,WarrenWeckesser/scipy,person142/scipy,behzadnouri/scipy,Srisai85/scipy,josephcslater/scipy,larsmans/scipy,mortada/scipy,aeklant/scipy,gef756/scipy,kleskjr/scipy,anielsen001/scipy,perimosocordiae/scipy,andyfaff/scipy,vigna/scipy,person142/scipy,Eric89GXL/scipy,Stefan-Endres/scipy,pyramania/scipy,matthew-brett/scipy,richardotis/scipy,jseabold/scipy,e-q/scipy,gfyoung/scipy,petebachant/scipy,ndchorley/scipy,ilayn/scipy,gef756/scipy,jakevdp/scipy,minhlongdo/scipy,newemailjdm/scipy,scipy/scipy,ales-erjavec/scipy,petebachant/scipy,zerothi/scipy,cpaulik/scipy,vberaudi/scipy,mingwpy/scipy,lukauskas/scipy,pschella/scipy,jor-/scipy,Kamp9/scipy,andim/scipy,felipebetancur/scipy,WarrenWeckesser/scipy,maniteja123/scipy,aarchiba/scipy,haudren/scipy,aarchiba/scipy,pschella/scipy,sauliusl/scipy,lukauskas/scipy,sauliusl/scipy,aeklant/scipy,maniteja123/scipy,richardotis/scipy,nonhermitian/scipy,hainm/scipy,bkendzior/scipy,Eric89GXL/scipy,pizzathief/scipy,ales-erjavec/scipy,anntzer/scipy,Srisai85/scipy,ales-erjavec/scipy,mdhaber/scipy,jakevdp/scipy,sauliusl/scipy,haudren/scipy,larsmans/scipy,dominicelse/scipy,dominicelse/scipy,apbard/scipy,scipy/scipy,behzadnouri/scipy,mgaitan/scipy,minhlongdo/scipy,vigna/scipy,e-q/scipy,futurulus/scipy,zaxliu/scipy,newemailjdm/scipy,mikebenfield/scipy,ilayn/scipy,Shaswat27/scipy,piyush0609/scipy,e-q/scipy,behzadnouri/scipy,zaxliu/scipy,aarchiba/scipy,zxsted/scipy,giorgiop/scipy,nmayorov/scipy,Kamp9/scipy,Stefan-Endres/scipy,minhlongdo/scipy,cpaulik/scipy,sonnyhu/scipy,ndchorley/scipy,arokem/scipy,lhilt/scipy,anntzer/scipy,ChanderG/scipy,fredrikw/scipy,zerothi/scipy,nmayorov/scipy,argriffing/scipy,andim/scipy,raoulbq/scipy,jamestwebber/scipy,fernand/scipy,pnedunuri/scipy,richardotis/scipy,newemailjdm/scipy,surhudm/scipy,Shaswat27/scipy,grlee77/scipy,matthewalbani/scipy,gertingold/scipy,sauliusl/scipy,aarchiba/scipy,woodscn/scipy,cpaulik/scipy,sriki18/scipy,matthew-brett/scipy,WillieMaddox/scipy,Gillu13/scipy,rmcgibbo/scipy,anntzer/scipy,matthewalbani/scipy,piyush0609/scipy,mortada/scipy,pyramania/scipy,rmcgibbo/scipy,WillieMaddox/scipy,mgaitan/scipy,scipy/scipy,pbrod/scipy,kalvdans/scipy,mikebenfield/scipy,surhudm/scipy,Kamp9/scipy,piyush0609/scipy,giorgiop/scipy,jseabold/scipy,Eric89GXL/scipy,pizzathief/scipy,e-q/scipy,mdhaber/scipy,rmcgibbo/scipy,njwilson23/scipy,zaxliu/scipy,bkendzior/scipy,person142/scipy,tylerjereddy/scipy,jonycgn/scipy,niknow/scipy,person142/scipy,zerothi/scipy,hainm/scipy,nonhermitian/scipy,niknow/scipy,fredrikw/scipy,aman-iitj/scipy,hainm/scipy,mtrbean/scipy,perimosocordiae/scipy,zerothi/scipy,rgommers/scipy,fredrikw/scipy,lukauskas/scipy,aarchiba/scipy,gef756/scipy,aman-iitj/scipy,jamestwebber/scipy,kleskjr/scipy,anielsen001/scipy,jor-/scipy,apbard/scipy,mgaitan/scipy,fredrikw/scipy,ChanderG/scipy,ortylp/scipy,matthewalbani/scipy,apbard/scipy,Gillu13/scipy,lhilt/scipy,zxsted/scipy,minhlongdo/scipy,felipebetancur/scipy,dominicelse/scipy,jor-/scipy,vanpact/scipy,zerothi/scipy,mhogg/scipy,jakevdp/scipy,behzadnouri/scipy,FRidh/scipy,surhudm/scipy,ilayn/scipy,trankmichael/scipy,rmcgibbo/scipy,jjhelmus/scipy,nonhermitian/scipy,piyush0609/scipy,FRidh/scipy,scipy/scipy,mingwpy/scipy,ilayn/scipy,gfyoung/scipy,gdooper/scipy,hainm/scipy,andyfaff/scipy,Eric89GXL/scipy,zaxliu/scipy,petebachant/scipy,mortada/scipy,gertingold/scipy,aman-iitj/scipy,woodscn/scipy,aeklant/scipy,Newman101/scipy,tylerjereddy/scipy,grlee77/scipy,kleskjr/scipy,raoulbq/scipy,gfyoung/scipy,vigna/scipy,larsmans/scipy,anntzer/scipy,argriffing/scipy,josephcslater/scipy,piyush0609/scipy,sonnyhu/scipy,pschella/scipy,befelix/scipy,felipebetancur/scipy,dominicelse/scipy,arokem/scipy,trankmichael/scipy,kalvdans/scipy,FRidh/scipy,felipebetancur/scipy,gertingold/scipy,pizzathief/scipy,WillieMaddox/scipy,jjhelmus/scipy,andyfaff/scipy,rgommers/scipy,piyush0609/scipy,argriffing/scipy,jonycgn/scipy,mtrbean/scipy,perimosocordiae/scipy,gfyoung/scipy,mhogg/scipy,dominicelse/scipy,behzadnouri/scipy,jor-/scipy,jseabold/scipy,jonycgn/scipy,andim/scipy,futurulus/scipy,njwilson23/scipy,jakevdp/scipy,zxsted/scipy,jjhelmus/scipy,vanpact/scipy
--- +++ @@ -0,0 +1,71 @@ +from __future__ import division, print_function, absolute_import + +import itertools +import threading +import time + +import numpy as np +from numpy.testing import TestCase, assert_equal, run_module_suite +from numpy.testing.decorators import slow +import scipy.interpolate +from scipy._lib._testutils import knownfailure_overridable + + +class TestGIL(TestCase): + """Check if the GIL is properly released by scipy.interpolate functions.""" + + def setUp(self): + self.messages = [] + + def log(self, message): + self.messages.append(message) + + def make_worker_thread(self, target, args): + log = self.log + + class WorkerThread(threading.Thread): + def run(self): + log('interpolation started') + target(*args) + log('interpolation complete') + + return WorkerThread() + + @slow + @knownfailure_overridable('race conditions, may depend on system load') + def test_rectbivariatespline(self): + def generate_params(n_points): + x = y = np.linspace(0, 1000, n_points) + x_grid, y_grid = np.meshgrid(x, y) + z = x_grid * y_grid + return x, y, z + + def calibrate_delay(requested_time): + for n_points in itertools.count(5000, 1000): + args = generate_params(n_points) + time_started = time.time() + interpolate(*args) + if time.time() - time_started > requested_time: + return args + + def interpolate(x, y, z): + scipy.interpolate.RectBivariateSpline(x, y, z) + + args = calibrate_delay(requested_time=3) + worker_thread = self.make_worker_thread(interpolate, args) + worker_thread.start() + for i in range(3): + time.sleep(0.5) + self.log('working') + worker_thread.join() + assert_equal(self.messages, [ + 'interpolation started', + 'working', + 'working', + 'working', + 'interpolation complete', + ]) + + +if __name__ == "__main__": + run_module_suite()
1d67482432ec56ee66d6fd7f6604d73f6d1a495f
ipplan2sqlite/tests/TestParser.py
ipplan2sqlite/tests/TestParser.py
import os import sqlite3 import sys import unittest path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '../lib' ) ) sys.path.insert( 1, path ) import parser class TestParser(unittest.TestCase): def setUp(self): self.conn = sqlite3.connect(':memory:') def testParseIPv4(self): self.assertEquals( parser.ip2long( '8.8.8.8', 4 ), 134744072 ) self.assertEquals( parser.ip2long( '77.80.251.247/32', 4 ), 1297153015 ) def testParserMapping(self): self.assertEquals( parser.parser_func( ["#$ d20--b.event.dreamhack.local\t\t\t10.0.3.45\t\t\tipv4f;ipv4r;tblswmgmt"] ), "host" ) self.assertEquals( parser.parser_func( ["""TECH-SRV-1-INT D-FW-V 77.80.231.0/27 921 othernet"""]) , "network" ) self.assertEquals( parser.parser_func( ["""#@ IPV4-NET 77.80.128.0/17"""] ), "master_network" ) def main(): unittest.main() if __name__ == '__main__': main()
Add first couple of unit tests for ipplan2sqlite
Add first couple of unit tests for ipplan2sqlite
Python
bsd-3-clause
nlindblad/ipplan2sqlite,nlindblad/ipplan2sqlite,nlindblad/ipplan2sqlite,nlindblad/ipplan2sqlite,nlindblad/ipplan2sqlite
--- +++ @@ -0,0 +1,28 @@ +import os +import sqlite3 +import sys +import unittest + +path = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '../lib' ) ) +sys.path.insert( 1, path ) +import parser + +class TestParser(unittest.TestCase): + + def setUp(self): + self.conn = sqlite3.connect(':memory:') + + def testParseIPv4(self): + self.assertEquals( parser.ip2long( '8.8.8.8', 4 ), 134744072 ) + self.assertEquals( parser.ip2long( '77.80.251.247/32', 4 ), 1297153015 ) + + def testParserMapping(self): + self.assertEquals( parser.parser_func( ["#$ d20--b.event.dreamhack.local\t\t\t10.0.3.45\t\t\tipv4f;ipv4r;tblswmgmt"] ), "host" ) + self.assertEquals( parser.parser_func( ["""TECH-SRV-1-INT D-FW-V 77.80.231.0/27 921 othernet"""]) , "network" ) + self.assertEquals( parser.parser_func( ["""#@ IPV4-NET 77.80.128.0/17"""] ), "master_network" ) + +def main(): + unittest.main() + +if __name__ == '__main__': + main()
051d00f65d4f5a8f03c7a91dcd1a74b454fd7943
test-CijUtil.py
test-CijUtil.py
import CijUtil import numpy as np import unittest class TestInvertCijFunctions(unittest.TestCase): def setUp(self): self.inmatrix = np.matrix([[0.700, 0.200],[0.400, 0.600]]) self.inerrors = np.matrix([[0.007, 0.002],[0.004, 0.006]]) self.true_inv = np.matrix([[1.765, -0.588],[-1.177, 2.059]]) self.true_err = np.sqrt(np.matrix([[5.269E-4, 1.603E-4],[6.413E-4, 7.172E-4]])) self.true_cov = np.array([[[[5.269E-4,-2.245E-4],[-4.490E-4, 2.514E-4]], [[-2.245E-4,1.603E-4],[2.514E-4,-2.619E-4]]], [[[-4.490E-4, 2.514E-4],[6.413E-4, -5.238E-4]], [[2.514E-4, -2.619E-4],[-5.238E-4,7.172E-4]]]]) (self.calc_inv, self.calc_err, self.calc_cov) = CijUtil.invertCij(self.inmatrix, self.inerrors) def test_inverse(self): for i in range(2): for j in range(2): self.assertAlmostEqual(self.calc_inv[i,j], self.true_inv[i,j], 2) def test_inverseErrors(self): for i in range(2): for j in range(2): self.assertAlmostEqual(self.calc_err[i,j], self.true_err[i,j], 4) def test_inverseCovar(self): for i in range(2): for j in range(2): for k in range(2): for l in range(2): self.assertAlmostEqual(self.calc_cov[i,j,k,l], self.true_cov[i,j,k,l], 7) if __name__ == '__main__': unittest.main()
Test harness and tests for the invert function
Test harness and tests for the invert function
Python
bsd-3-clause
andreww/elastic-constants,duyuan11/elastic-constants
--- +++ @@ -0,0 +1,37 @@ +import CijUtil +import numpy as np +import unittest + +class TestInvertCijFunctions(unittest.TestCase): + + def setUp(self): + self.inmatrix = np.matrix([[0.700, 0.200],[0.400, 0.600]]) + self.inerrors = np.matrix([[0.007, 0.002],[0.004, 0.006]]) + self.true_inv = np.matrix([[1.765, -0.588],[-1.177, 2.059]]) + self.true_err = np.sqrt(np.matrix([[5.269E-4, 1.603E-4],[6.413E-4, 7.172E-4]])) + self.true_cov = np.array([[[[5.269E-4,-2.245E-4],[-4.490E-4, 2.514E-4]], + [[-2.245E-4,1.603E-4],[2.514E-4,-2.619E-4]]], + [[[-4.490E-4, 2.514E-4],[6.413E-4, -5.238E-4]], + [[2.514E-4, -2.619E-4],[-5.238E-4,7.172E-4]]]]) + (self.calc_inv, self.calc_err, self.calc_cov) = CijUtil.invertCij(self.inmatrix, self.inerrors) + + def test_inverse(self): + for i in range(2): + for j in range(2): + self.assertAlmostEqual(self.calc_inv[i,j], self.true_inv[i,j], 2) + + def test_inverseErrors(self): + for i in range(2): + for j in range(2): + self.assertAlmostEqual(self.calc_err[i,j], self.true_err[i,j], 4) + + def test_inverseCovar(self): + for i in range(2): + for j in range(2): + for k in range(2): + for l in range(2): + self.assertAlmostEqual(self.calc_cov[i,j,k,l], self.true_cov[i,j,k,l], 7) + +if __name__ == '__main__': + unittest.main() +