commit
stringlengths
40
40
old_file
stringlengths
4
150
new_file
stringlengths
4
150
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
501
message
stringlengths
15
4.06k
lang
stringclasses
4 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
diff
stringlengths
0
4.35k
5b8518d3b7bdd55ee20dec81f18c4b9a8732decd
test/views/test_failures.py
test/views/test_failures.py
from textwrap import dedent import pytest from puppetboard.views.failures import get_friendly_error # flake8: noqa @pytest.mark.parametrize("raw_message,friendly_message", [ ("Could not retrieve catalog from remote server: Error 500 on SERVER: Server Error: Evaluation " "Error: Error while evaluating a Resource Statement, Evaluation Error: Error while evaluating " "a Function Call, This envs has Consul ACLs enabled. Please add the app 'statsproxy' to the " "'profiles::consul::server::policies' hiera key. (file: " "/etc/puppetlabs/code/environments/patch/modules/consul_wrapper/functions/service" "/get_acl_token.pp, line: 22, column: 7) (file: " "/etc/puppetlabs/code/environments/patch/modules/roles/manifests/tomcat/stats.pp, line: 39) " "on node foo.bar.com", """ Error while evaluating a Resource Statement: Error while evaluating a Function Call: This envs has Consul ACLs enabled. Please add the app 'statsproxy' to the 'profiles::consul::server::policies' hiera key. (file: …/patch/modules/consul_wrapper/functions/service/get_acl_token.pp, line: 22, column: 7) …in …/patch/modules/roles/manifests/tomcat/stats.pp, line: 39. """), ("Could not retrieve catalog from remote server: Error 500 on SERVER: Server Error: " "Evaluation Error: Error while evaluating a Method call, Could not find class " "::profiles::snapshot_restore for foo.bar.com (file: " "/etc/puppetlabs/code/environments/qa/manifests/site.pp, line: 31, column: 7) on node " "foo.bar.com", """ Error while evaluating a Method call: Could not find class ::profiles::snapshot_restore …in …/qa/manifests/site.pp, line: 31, column: 7. """), ]) def test_get_friendly_error(raw_message, friendly_message): raw_message = dedent(raw_message) friendly_message = dedent(friendly_message).strip() assert get_friendly_error("Puppet", raw_message, "foo.bar.com") == friendly_message
Add tests for friendly errors
Add tests for friendly errors
Python
apache-2.0
voxpupuli/puppetboard,voxpupuli/puppetboard,voxpupuli/puppetboard
--- +++ @@ -0,0 +1,42 @@ +from textwrap import dedent + +import pytest + +from puppetboard.views.failures import get_friendly_error + + +# flake8: noqa +@pytest.mark.parametrize("raw_message,friendly_message", [ + ("Could not retrieve catalog from remote server: Error 500 on SERVER: Server Error: Evaluation " + "Error: Error while evaluating a Resource Statement, Evaluation Error: Error while evaluating " + "a Function Call, This envs has Consul ACLs enabled. Please add the app 'statsproxy' to the " + "'profiles::consul::server::policies' hiera key. (file: " + "/etc/puppetlabs/code/environments/patch/modules/consul_wrapper/functions/service" + "/get_acl_token.pp, line: 22, column: 7) (file: " + "/etc/puppetlabs/code/environments/patch/modules/roles/manifests/tomcat/stats.pp, line: 39) " + "on node foo.bar.com", """ + Error while evaluating a Resource Statement: + + Error while evaluating a Function Call: + + This envs has Consul ACLs enabled. Please add the app 'statsproxy' to the 'profiles::consul::server::policies' hiera key. (file: …/patch/modules/consul_wrapper/functions/service/get_acl_token.pp, line: 22, column: 7) + + …in …/patch/modules/roles/manifests/tomcat/stats.pp, line: 39. + """), + + ("Could not retrieve catalog from remote server: Error 500 on SERVER: Server Error: " + "Evaluation Error: Error while evaluating a Method call, Could not find class " + "::profiles::snapshot_restore for foo.bar.com (file: " + "/etc/puppetlabs/code/environments/qa/manifests/site.pp, line: 31, column: 7) on node " + "foo.bar.com", """ + Error while evaluating a Method call: + + Could not find class ::profiles::snapshot_restore + + …in …/qa/manifests/site.pp, line: 31, column: 7. + """), +]) +def test_get_friendly_error(raw_message, friendly_message): + raw_message = dedent(raw_message) + friendly_message = dedent(friendly_message).strip() + assert get_friendly_error("Puppet", raw_message, "foo.bar.com") == friendly_message
5b7b301c3f9dd906b8450acc5b28dbcb35fe973a
candidates/management/commands/candidates_fix_not_standing.py
candidates/management/commands/candidates_fix_not_standing.py
from __future__ import print_function, unicode_literals from django.core.management.base import BaseCommand from popolo.models import Membership from candidates.models import PersonExtra class Command(BaseCommand): help = "Find elections in not_standing that should be removed" def add_arguments(self, parser): parser.add_argument( '--delete', action='store_true', help="Don't just find these broken cases, also fix them", ) def handle(self, *args, **options): for person_extra in PersonExtra.objects.filter( not_standing__isnull=False ): election_to_remove = [] for election in person_extra.not_standing.all(): candidacies = Membership.objects.filter( person=person_extra.base, extra__election=election, role=election.candidate_membership_role, ) if candidacies.exists(): election_to_remove.append(election) # Now print out the elections we would remove from # not_standing for that person. (And, if --delete is # specified, actually remove it.) for election in election_to_remove: fmt = '{person} is marked as not standing in {election}' print(fmt.format(person=person_extra.base, election=election)) print(' ... but also has a candidacy in that election!') if options['delete']: fmt = " Deleting {election} from {person}'s not_standing" print(fmt.format( election=election.name, person=person_extra.base.name, )) person_extra.not_standing.remove(election)
Add a script to fix the not_standing relationships of people
Add a script to fix the not_standing relationships of people There was a bug in bulk adding people which meant that their "not_standing" status for an election wasn't removed when reinstating them as a candidate in that election. That bug has been fixed in the parent commit, but there are still people in the database who have a candidacy (represented by a Membership and MembershipExtra) in an election, but also have that election in their not_standing. This commit introduces a script that will find those cases, report them and (if --delete is specified) fix the inconsistency by removing the election from their not_standing.
Python
agpl-3.0
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
--- +++ @@ -0,0 +1,45 @@ +from __future__ import print_function, unicode_literals + +from django.core.management.base import BaseCommand + +from popolo.models import Membership + +from candidates.models import PersonExtra + +class Command(BaseCommand): + + help = "Find elections in not_standing that should be removed" + + def add_arguments(self, parser): + parser.add_argument( + '--delete', action='store_true', + help="Don't just find these broken cases, also fix them", + ) + + def handle(self, *args, **options): + for person_extra in PersonExtra.objects.filter( + not_standing__isnull=False + ): + election_to_remove = [] + for election in person_extra.not_standing.all(): + candidacies = Membership.objects.filter( + person=person_extra.base, + extra__election=election, + role=election.candidate_membership_role, + ) + if candidacies.exists(): + election_to_remove.append(election) + # Now print out the elections we would remove from + # not_standing for that person. (And, if --delete is + # specified, actually remove it.) + for election in election_to_remove: + fmt = '{person} is marked as not standing in {election}' + print(fmt.format(person=person_extra.base, election=election)) + print(' ... but also has a candidacy in that election!') + if options['delete']: + fmt = " Deleting {election} from {person}'s not_standing" + print(fmt.format( + election=election.name, + person=person_extra.base.name, + )) + person_extra.not_standing.remove(election)
a2efa662f0f5b8fe77da5673cb6d6df2e2f583d2
django/website/contacts/migrations/0004_auto_20160421_1645.py
django/website/contacts/migrations/0004_auto_20160421_1645.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models def add_user_profiles(apps, schema_editor): User = apps.get_model('contacts', 'User') UserPreferences = apps.get_model('contacts', 'UserPreferences') for user in User.objects.all(): UserPreferences.objects.create(user=user) class Migration(migrations.Migration): dependencies = [ ('contacts', '0003_auto_20160420_1628'), ] operations = [ migrations.RunPython(add_user_profiles) ]
Add migration to create user profiles
Add migration to create user profiles
Python
agpl-3.0
aptivate/kashana,aptivate/alfie,daniell/kashana,aptivate/alfie,daniell/kashana,aptivate/kashana,aptivate/alfie,daniell/kashana,daniell/kashana,aptivate/kashana,aptivate/alfie,aptivate/kashana
--- +++ @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +def add_user_profiles(apps, schema_editor): + User = apps.get_model('contacts', 'User') + UserPreferences = apps.get_model('contacts', 'UserPreferences') + + for user in User.objects.all(): + UserPreferences.objects.create(user=user) + + +class Migration(migrations.Migration): + + dependencies = [ + ('contacts', '0003_auto_20160420_1628'), + ] + + operations = [ + migrations.RunPython(add_user_profiles) + ]
572f6d8e789495fc34ed67230b10b0c1f0b3572f
helusers/tests/test_utils.py
helusers/tests/test_utils.py
import pytest import random from uuid import UUID from helusers.utils import uuid_to_username, username_to_uuid def test_uuid_to_username(): assert uuid_to_username('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') == 'u-ad52zgilvnpgnduefzlh5jgr6y' def test_username_to_uuid(): assert username_to_uuid('u-ad52zgilvnpgnduefzlh5jgr6y') == UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') def test_reflective_username_uuid_relationship(): rd = random.Random() rd.seed(0) for uuid in [UUID(int=rd.getrandbits(128)) for i in range(0,100)]: assert username_to_uuid(uuid_to_username(uuid)) == uuid
Add some tests for helusers
Add some tests for helusers
Python
bsd-2-clause
City-of-Helsinki/django-helusers,City-of-Helsinki/django-helusers
--- +++ @@ -0,0 +1,20 @@ +import pytest +import random +from uuid import UUID +from helusers.utils import uuid_to_username, username_to_uuid + + +def test_uuid_to_username(): + assert uuid_to_username('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') == 'u-ad52zgilvnpgnduefzlh5jgr6y' + + +def test_username_to_uuid(): + assert username_to_uuid('u-ad52zgilvnpgnduefzlh5jgr6y') == UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') + + +def test_reflective_username_uuid_relationship(): + rd = random.Random() + rd.seed(0) + + for uuid in [UUID(int=rd.getrandbits(128)) for i in range(0,100)]: + assert username_to_uuid(uuid_to_username(uuid)) == uuid
3127cface44165d3200657c3fa626a5051c6ad48
tests/test_show_resource.py
tests/test_show_resource.py
from nose.plugins.attrib import attr from rightscale.rightscale import RightScale, Resource @attr('rc_creds', 'real_conn') def test_show_first_cloud(): api = RightScale() res = api.clouds.show(res_id=1) assert isinstance(res, Resource)
Test API call that only returns a single Resource
Test API call that only returns a single Resource
Python
mit
diranged/python-rightscale-1,brantai/python-rightscale
--- +++ @@ -0,0 +1,10 @@ +from nose.plugins.attrib import attr + +from rightscale.rightscale import RightScale, Resource + + +@attr('rc_creds', 'real_conn') +def test_show_first_cloud(): + api = RightScale() + res = api.clouds.show(res_id=1) + assert isinstance(res, Resource)
d76809021c99f841cd8d123058d307404b7c025c
py/split-array-into-consecutive-subsequences.py
py/split-array-into-consecutive-subsequences.py
from itertools import groupby class Solution(object): def isPossible(self, nums): """ :type nums: List[int] :rtype: bool """ prev = None not_full_1, not_full_2, attach = 0, 0, 0 for n, items in groupby(nums): cnt = len(list(items)) if prev is None: not_full_1 = cnt elif prev + 1 == n: if not_full_1 + not_full_2 > cnt: return False else: cnt -= not_full_1 + not_full_2 attach = min(attach, cnt) cnt -= attach not_full_1, not_full_2, attach = cnt, not_full_1, not_full_2 + attach else: if not_full_1 + not_full_2 > 0: return False not_full_1, not_full_2, attach = cnt, 0, 0 prev = n return not_full_1 + not_full_2 == 0
Add py solution for 659. Split Array into Consecutive Subsequences
Add py solution for 659. Split Array into Consecutive Subsequences 659. Split Array into Consecutive Subsequences: https://leetcode.com/problems/split-array-into-consecutive-subsequences/
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
--- +++ @@ -0,0 +1,27 @@ +from itertools import groupby +class Solution(object): + def isPossible(self, nums): + """ + :type nums: List[int] + :rtype: bool + """ + prev = None + not_full_1, not_full_2, attach = 0, 0, 0 + for n, items in groupby(nums): + cnt = len(list(items)) + if prev is None: + not_full_1 = cnt + elif prev + 1 == n: + if not_full_1 + not_full_2 > cnt: + return False + else: + cnt -= not_full_1 + not_full_2 + attach = min(attach, cnt) + cnt -= attach + not_full_1, not_full_2, attach = cnt, not_full_1, not_full_2 + attach + else: + if not_full_1 + not_full_2 > 0: + return False + not_full_1, not_full_2, attach = cnt, 0, 0 + prev = n + return not_full_1 + not_full_2 == 0
f8db46b40629cfdb145a4a000d47277f72090c5b
powerline/lib/memoize.py
powerline/lib/memoize.py
# vim:fileencoding=utf-8:noet from functools import wraps import time def default_cache_key(**kwargs): return frozenset(kwargs.items()) class memoize(object): '''Memoization decorator with timeout.''' def __init__(self, timeout, cache_key=default_cache_key, cache_reg_func=None): self.timeout = timeout self.cache_key = cache_key self.cache = {} self.cache_reg_func = cache_reg_func def __call__(self, func): @wraps(func) def decorated_function(**kwargs): if self.cache_reg_func: self.cache_reg_func(self.cache) self.cache_reg_func = None key = self.cache_key(**kwargs) try: cached = self.cache.get(key, None) except TypeError: return func(**kwargs) if cached is None or time.time() - cached['time'] > self.timeout: cached = self.cache[key] = { 'result': func(**kwargs), 'time': time.time(), } return cached['result'] return decorated_function
# vim:fileencoding=utf-8:noet from functools import wraps try: # Python>=3.3, the only valid clock source for this job from time import monotonic as time except ImportError: # System time, is affected by clock updates. from time import time def default_cache_key(**kwargs): return frozenset(kwargs.items()) class memoize(object): '''Memoization decorator with timeout.''' def __init__(self, timeout, cache_key=default_cache_key, cache_reg_func=None): self.timeout = timeout self.cache_key = cache_key self.cache = {} self.cache_reg_func = cache_reg_func def __call__(self, func): @wraps(func) def decorated_function(**kwargs): if self.cache_reg_func: self.cache_reg_func(self.cache) self.cache_reg_func = None key = self.cache_key(**kwargs) try: cached = self.cache.get(key, None) except TypeError: return func(**kwargs) # Handle case when time() appears to be less then cached['time'] due # to clock updates. Not applicable for monotonic clock, but this # case is currently rare. if cached is None or not (cached['time'] < time() < cached['time'] + self.timeout): cached = self.cache[key] = { 'result': func(**kwargs), 'time': time(), } return cached['result'] return decorated_function
Use proper clock if possible
Use proper clock if possible
Python
mit
Liangjianghao/powerline,kenrachynski/powerline,darac/powerline,darac/powerline,bezhermoso/powerline,firebitsbr/powerline,bartvm/powerline,cyrixhero/powerline,junix/powerline,prvnkumar/powerline,s0undt3ch/powerline,S0lll0s/powerline,Luffin/powerline,EricSB/powerline,dragon788/powerline,prvnkumar/powerline,wfscheper/powerline,xfumihiro/powerline,magus424/powerline,IvanAli/powerline,cyrixhero/powerline,seanfisk/powerline,bartvm/powerline,wfscheper/powerline,dragon788/powerline,xfumihiro/powerline,magus424/powerline,cyrixhero/powerline,xxxhycl2010/powerline,dragon788/powerline,lukw00/powerline,DoctorJellyface/powerline,blindFS/powerline,seanfisk/powerline,blindFS/powerline,IvanAli/powerline,IvanAli/powerline,keelerm84/powerline,Luffin/powerline,s0undt3ch/powerline,Liangjianghao/powerline,blindFS/powerline,S0lll0s/powerline,EricSB/powerline,lukw00/powerline,junix/powerline,areteix/powerline,junix/powerline,QuLogic/powerline,prvnkumar/powerline,seanfisk/powerline,bezhermoso/powerline,QuLogic/powerline,russellb/powerline,bezhermoso/powerline,russellb/powerline,bartvm/powerline,darac/powerline,lukw00/powerline,kenrachynski/powerline,firebitsbr/powerline,areteix/powerline,magus424/powerline,xfumihiro/powerline,Luffin/powerline,keelerm84/powerline,s0undt3ch/powerline,DoctorJellyface/powerline,wfscheper/powerline,xxxhycl2010/powerline,xxxhycl2010/powerline,firebitsbr/powerline,russellb/powerline,EricSB/powerline,DoctorJellyface/powerline,Liangjianghao/powerline,areteix/powerline,S0lll0s/powerline,kenrachynski/powerline,QuLogic/powerline
--- +++ @@ -1,7 +1,12 @@ # vim:fileencoding=utf-8:noet from functools import wraps -import time +try: + # Python>=3.3, the only valid clock source for this job + from time import monotonic as time +except ImportError: + # System time, is affected by clock updates. + from time import time def default_cache_key(**kwargs): @@ -28,10 +33,13 @@ cached = self.cache.get(key, None) except TypeError: return func(**kwargs) - if cached is None or time.time() - cached['time'] > self.timeout: + # Handle case when time() appears to be less then cached['time'] due + # to clock updates. Not applicable for monotonic clock, but this + # case is currently rare. + if cached is None or not (cached['time'] < time() < cached['time'] + self.timeout): cached = self.cache[key] = { 'result': func(**kwargs), - 'time': time.time(), + 'time': time(), } return cached['result'] return decorated_function
0886d0fe49f4176bfe6860c643d240a9b7e0053d
db/player_draft.py
db/player_draft.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base, session_scope class PlayerDraft(Base): __tablename__ = 'player_drafts' __autoload__ = True def __init__(self, player_id, team_id, year, round, overall, dft_type='e'): self.player_id = player_id self.team_id = team_id self.year = year self.round = round self.overall = overall self.draft_type = dft_type @classmethod def find_by_player_id(self, player_id): with session_scope() as session: try: plr_draft = session.query(PlayerDraft).filter( PlayerDraft.player_id == player_id ).all() except: plr_draft = None return plr_draft def update(self, other): for attr in ['team_id', 'year', 'round', 'overall', 'draft_type']: if hasattr(other, attr): setattr(self, attr, getattr(other, attr)) def __eq__(self, other): return ( (self.team_id, self.year, self.round, self.overall, self.draft_type ) == (other.team_id, other.year, other.round, other.overall, other.draft_type )) def __ne__(self, other): return not self == other
Integrate initial version of player draft item
Integrate initial version of player draft item
Python
mit
leaffan/pynhldb
--- +++ @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +from .common import Base, session_scope + + +class PlayerDraft(Base): + __tablename__ = 'player_drafts' + __autoload__ = True + + def __init__(self, player_id, team_id, year, round, overall, dft_type='e'): + self.player_id = player_id + self.team_id = team_id + self.year = year + self.round = round + self.overall = overall + self.draft_type = dft_type + + @classmethod + def find_by_player_id(self, player_id): + with session_scope() as session: + try: + plr_draft = session.query(PlayerDraft).filter( + PlayerDraft.player_id == player_id + ).all() + except: + plr_draft = None + return plr_draft + + def update(self, other): + for attr in ['team_id', 'year', 'round', 'overall', 'draft_type']: + if hasattr(other, attr): + setattr(self, attr, getattr(other, attr)) + + def __eq__(self, other): + return ( + (self.team_id, self.year, self.round, + self.overall, self.draft_type + ) == + (other.team_id, other.year, other.round, + other.overall, other.draft_type + )) + + def __ne__(self, other): + return not self == other
699f1f42e0387ac542cbe0905f825079e7aab755
testupload.py
testupload.py
#!/usr/bin/python from datetime import datetime from datetime import timedelta import subprocess import time import logging from wrappers import GPhoto from wrappers import Identify from wrappers import Curl #sudo /usr/local/bin/gphoto2 --capture-image-and-download --filename 'test3.jpg' #curl --form "fileupload=@test7.jpg" http://192.168.178.197:5000/ MIN_INTER_SHOT_DELAY_SECONDS = timedelta(seconds=30) UPLOAD_URL = "http://upload-test:5000/" LOG_NAME = "timelapse.log" def main(): print "Timelapse upload test" camera = GPhoto(subprocess) idy = Identify(subprocess) curl = Curl(subprocess) logging.basicConfig(filename=LOG_NAME, filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) logging.info("Starting timelapse") logger = logging.getLogger('timelapse') # myLogger.debug(msg) # myLogger.info(msg) # myLogger.warn(msg) # myLogger.error(msg) # myLogger.critical(msg) current_config = 25 #11 shot = 0 prev_acquired = None last_acquired = None last_started = None try: while True: last_started = datetime.now() last_acquired = datetime.now() filename = "20170421-024718.jpg" curl.fileupload(filename, UPLOAD_URL) if last_started and last_acquired and last_acquired - last_started < MIN_INTER_SHOT_DELAY_SECONDS: print "Sleeping for %s" % str(MIN_INTER_SHOT_DELAY_SECONDS - (last_acquired - last_started)) time.sleep((MIN_INTER_SHOT_DELAY_SECONDS - (last_acquired - last_started)).seconds) print "Forced sleep" time.sleep(MIN_INTER_SHOT_DELAY_SECONDS.seconds) shot = shot + 1 except Exception,e: print str(e) logger.error(e) logging.shutdown() if __name__ == "__main__": main()
Add upload and delay test
Add upload and delay test
Python
mit
Lakerfield/timelapse
--- +++ @@ -0,0 +1,71 @@ +#!/usr/bin/python + +from datetime import datetime +from datetime import timedelta +import subprocess +import time +import logging + +from wrappers import GPhoto +from wrappers import Identify +from wrappers import Curl + +#sudo /usr/local/bin/gphoto2 --capture-image-and-download --filename 'test3.jpg' +#curl --form "fileupload=@test7.jpg" http://192.168.178.197:5000/ + +MIN_INTER_SHOT_DELAY_SECONDS = timedelta(seconds=30) +UPLOAD_URL = "http://upload-test:5000/" +LOG_NAME = "timelapse.log" + +def main(): + print "Timelapse upload test" + camera = GPhoto(subprocess) + idy = Identify(subprocess) + curl = Curl(subprocess) + + logging.basicConfig(filename=LOG_NAME, + filemode='a', + format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', + datefmt='%H:%M:%S', + level=logging.DEBUG) + + logging.info("Starting timelapse") + logger = logging.getLogger('timelapse') + + # myLogger.debug(msg) + # myLogger.info(msg) + # myLogger.warn(msg) + # myLogger.error(msg) + # myLogger.critical(msg) + + current_config = 25 #11 + shot = 0 + prev_acquired = None + last_acquired = None + last_started = None + + try: + while True: + last_started = datetime.now() + last_acquired = datetime.now() + filename = "20170421-024718.jpg" + + curl.fileupload(filename, UPLOAD_URL) + + if last_started and last_acquired and last_acquired - last_started < MIN_INTER_SHOT_DELAY_SECONDS: + print "Sleeping for %s" % str(MIN_INTER_SHOT_DELAY_SECONDS - (last_acquired - last_started)) + + time.sleep((MIN_INTER_SHOT_DELAY_SECONDS - (last_acquired - last_started)).seconds) + + print "Forced sleep" + time.sleep(MIN_INTER_SHOT_DELAY_SECONDS.seconds) + + shot = shot + 1 + except Exception,e: + print str(e) + logger.error(e) + logging.shutdown() + + +if __name__ == "__main__": + main()
67164cadc3f3445298da2fb490971cf22e2f146b
curious/ext/loapi/__init__.py
curious/ext/loapi/__init__.py
""" A lower-level State that doesn't do any special object handling. """ import inspect import typing from curious.gateway import Gateway from curious.state import State class PureDispatchState(State): """ A lower-level State that doesn't do any special object handling. This state allows you to pass JSON data straight to the event handlers registered on the Client instance. To use this instead of the base high-level state, you have to pass this as a class to the Client instance: .. code:: python my_client = Client(state_klass=PureDispatchState) """ def _fake_handle(self, event_name: str): """ Returns a function that can pretend to handle the event, when all it does is dispatch the raw data. :param event_name: The event name we're handling. """ async def _inner(gw: Gateway, event_data: dict): await self.client.dispatch(event_name, event_data, gateway=gw) return _inner def __getattribute__(self, item): # Intercept any `handle_` things. if not item.startswith("handle_"): return super().__getattribute__(item) return self._fake_handle(item.split("handle_")[1]) class CallbackState(PureDispatchState): """ An even lower-level State that invokes a single callback when an event is received. This callback must have the signature of (gw: Gateway, event: str, data: dict) -> None. This state can be passed directly into a Gateway instance to be used as the state instance. """ def __init__(self, callback: typing.Callable[[Gateway, str, dict], None]): super().__init__(None) self.callback = callback def _fake_handle(self, event_name: str): async def _inner(gw: Gateway, event_data: dict): result = self.callback(gw, event_name, event_data) if inspect.isawaitable(result): result = await result return _inner
Add very low level state handlers.
Add very low level state handlers.
Python
mit
SunDwarf/curious
--- +++ @@ -0,0 +1,62 @@ +""" +A lower-level State that doesn't do any special object handling. +""" +import inspect +import typing + +from curious.gateway import Gateway +from curious.state import State + + +class PureDispatchState(State): + """ + A lower-level State that doesn't do any special object handling. + This state allows you to pass JSON data straight to the event handlers registered on the Client instance. + + To use this instead of the base high-level state, you have to pass this as a class to the Client instance: + + .. code:: python + + my_client = Client(state_klass=PureDispatchState) + """ + + def _fake_handle(self, event_name: str): + """ + Returns a function that can pretend to handle the event, when all it does is dispatch the raw data. + + :param event_name: The event name we're handling. + """ + + async def _inner(gw: Gateway, event_data: dict): + await self.client.dispatch(event_name, event_data, gateway=gw) + + return _inner + + def __getattribute__(self, item): + # Intercept any `handle_` things. + if not item.startswith("handle_"): + return super().__getattribute__(item) + + return self._fake_handle(item.split("handle_")[1]) + + +class CallbackState(PureDispatchState): + """ + An even lower-level State that invokes a single callback when an event is received. + + This callback must have the signature of (gw: Gateway, event: str, data: dict) -> None. + + This state can be passed directly into a Gateway instance to be used as the state instance. + """ + def __init__(self, callback: typing.Callable[[Gateway, str, dict], None]): + super().__init__(None) + + self.callback = callback + + def _fake_handle(self, event_name: str): + async def _inner(gw: Gateway, event_data: dict): + result = self.callback(gw, event_name, event_data) + if inspect.isawaitable(result): + result = await result + + return _inner
af7b495b954bb624cbd95e0019fa3b2cb3be6b05
rsa.py
rsa.py
#!/usr/local/bin/python """ RSA.py @author Elliot and Erica """ import random from cryptography_utilities import (block_split, decimal_to_binary, binary_to_decimal, gcd, extended_gcd, random_prime, left_pad, pad_plaintext, unpad_plaintext, random_relative_prime, group_exponentiation) MODULUS_BITS = 16 def key_generation(): """Return a tuple of (prime1, prime2, modulus, public_key, private_key). the size of the prime numbers is determined by the PRIME_BITS global. """ prime1 = random_prime(MODULUS_BITS / 2) prime2 = random_prime(MODULUS_BITS / 2) modulus = prime1 * prime2 totient = (prime1 - 1) * (prime2 - 1) public_key = random_relative_prime(totient, MODULUS_BITS / 2) if extended_gcd(public_key, totient)[1] < 0: private_key = extended_gcd(public_key, totient)[1] + totient else: private_key = extended_gcd(public_key, totient)[1] return modulus, public_key, private_key def plaintext_block_size(): """Determine a block size using the MODULUS_BITS global. The value will be a multiple of eight and less than MODULUS_BITS. """ return (MODULUS_BITS - 1) - ((MODULUS_BITS - 1) % 8) def rsa_exponentiation(text, modulus, key): """Perform modular exponentiation of a message based on a key. I.E. (text^k) = text (mod modulus). """ integer_transformation = pow(binary_to_decimal(text), key, modulus) return decimal_to_binary(integer_transformation) def encrypt(binary_plaintext, modulus, public_key): """Generate binary ciphertext from binary plaintext with RSA.""" padded_plaintext = pad_plaintext(binary_plaintext, plaintext_block_size()) return ''.join(left_pad(rsa_exponentiation(block, modulus, public_key), MODULUS_BITS) for block in block_split(padded_plaintext, plaintext_block_size())) def decrypt(binary_ciphertext, modulus, private_key): """Reveal binary plaintext from binary ciphertext with RSA.""" plaintext = ''.join(left_pad(rsa_exponentiation(block, modulus, private_key), plaintext_block_size()) for block in block_split(binary_ciphertext, MODULUS_BITS)) return unpad_plaintext(plaintext)
Implement a public-key cipher (RSA)
Implement a public-key cipher (RSA)
Python
mit
ElliotPenson/cryptography
--- +++ @@ -0,0 +1,58 @@ +#!/usr/local/bin/python + +""" +RSA.py + +@author Elliot and Erica +""" + +import random +from cryptography_utilities import (block_split, decimal_to_binary, + binary_to_decimal, gcd, extended_gcd, random_prime, left_pad, + pad_plaintext, unpad_plaintext, random_relative_prime, + group_exponentiation) + +MODULUS_BITS = 16 + +def key_generation(): + """Return a tuple of (prime1, prime2, modulus, public_key, private_key). + the size of the prime numbers is determined by the PRIME_BITS global. + """ + prime1 = random_prime(MODULUS_BITS / 2) + prime2 = random_prime(MODULUS_BITS / 2) + modulus = prime1 * prime2 + totient = (prime1 - 1) * (prime2 - 1) + public_key = random_relative_prime(totient, MODULUS_BITS / 2) + if extended_gcd(public_key, totient)[1] < 0: + private_key = extended_gcd(public_key, totient)[1] + totient + else: + private_key = extended_gcd(public_key, totient)[1] + return modulus, public_key, private_key + +def plaintext_block_size(): + """Determine a block size using the MODULUS_BITS global. The value + will be a multiple of eight and less than MODULUS_BITS. + """ + return (MODULUS_BITS - 1) - ((MODULUS_BITS - 1) % 8) + +def rsa_exponentiation(text, modulus, key): + """Perform modular exponentiation of a message based on a key. I.E. + (text^k) = text (mod modulus). + """ + integer_transformation = pow(binary_to_decimal(text), key, modulus) + return decimal_to_binary(integer_transformation) + +def encrypt(binary_plaintext, modulus, public_key): + """Generate binary ciphertext from binary plaintext with RSA.""" + padded_plaintext = pad_plaintext(binary_plaintext, plaintext_block_size()) + return ''.join(left_pad(rsa_exponentiation(block, modulus, public_key), + MODULUS_BITS) + for block in block_split(padded_plaintext, + plaintext_block_size())) + +def decrypt(binary_ciphertext, modulus, private_key): + """Reveal binary plaintext from binary ciphertext with RSA.""" + plaintext = ''.join(left_pad(rsa_exponentiation(block, modulus, private_key), + plaintext_block_size()) + for block in block_split(binary_ciphertext, MODULUS_BITS)) + return unpad_plaintext(plaintext)
3f55d16fb40acc07cf07588249126cc543d9ad07
dissector/main.py
dissector/main.py
import os import sys sys.path.append('../p4_hlir/') from p4_hlir.main import HLIR p4_source = sys.argv[1] absolute_source = os.path.join(os.getcwd(), p4_source) if not os.path.isfile(absolute_source): print "Source file '" + p4_source + \ "' could not be opened or does not exist." hlir = HLIR(absolute_source) hlir.build()
Read a P4 file and get its HLIR.
Read a P4 file and get its HLIR.
Python
apache-2.0
yo2seol/P4-Wireshark-Dissector
--- +++ @@ -0,0 +1,13 @@ +import os +import sys +sys.path.append('../p4_hlir/') +from p4_hlir.main import HLIR + +p4_source = sys.argv[1] +absolute_source = os.path.join(os.getcwd(), p4_source) +if not os.path.isfile(absolute_source): + print "Source file '" + p4_source + \ + "' could not be opened or does not exist." + +hlir = HLIR(absolute_source) +hlir.build()
31525d83ea74852709b1dd1596854a74c050f9f2
scripts/add_requests.py
scripts/add_requests.py
from google.cloud import firestore import argparse import datetime import names import random def queryUsers(db): users_ref = db.collection(u'users') docs = users_ref.get() docList = list() for doc in docs: docList.append(doc) return docList def queryRequests(db): requests_ref = db.collection(u'requests') docs = requests_ref.get() docList = list() for doc in docs: docList.append(doc) return docList def printSnapshot(doc): print(u'Created {} => {}'.format(doc.id, doc.to_dict())) def existsRequest(requests, userA, userB): for request in requests: sender = request.get(u'senderId') receiver = request.get(u'receiverId') if (userA.id == sender and userB.id == receiver) or (userB.id == sender and userA.id == receiver): return True return False def createRequest(sender, receiver): doc_ref = db.collection(u'requests').document() unread = random.choice([True, False]) accepted = random.choice([True, False, False]) if accepted: acceptedTimestamp = datetime.datetime.now() doc_ref.set({ u'senderId': sender.id, u'receiverId': receiver.id, u'sentTimestamp': datetime.datetime.now(), u'unread': unread, u'accepted': accepted, u'acceptedTimestamp': acceptedTimestamp }) doc = doc_ref.get() return doc # very sub-optimal (but it's just a script) def addRequest(db, users, requests): # try upto 50 times for i in range(50): userA = random.choice(users) userB = random.choice(users) if userA.id == userB.id: continue if existsRequest(requests, userA, userB): continue return createRequest(userA, userB) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("-c", "--count", type=int, default=5) args = parser.parse_args() db = firestore.Client() users = queryUsers(db) requests = queryRequests(db) for i in range(0, args.count): request = addRequest(db, users, requests) if request is None: print("Adding a request failed at count:" + str(i)) break requests.append(request) printSnapshot(request) # Uncomment to query all users # queryUsers(db)
Add script to add requests
Add script to add requests
Python
mit
frinder/frinder-app,frinder/frinder-app,frinder/frinder-app
--- +++ @@ -0,0 +1,83 @@ +from google.cloud import firestore +import argparse +import datetime +import names +import random + +def queryUsers(db): + users_ref = db.collection(u'users') + docs = users_ref.get() + docList = list() + for doc in docs: + docList.append(doc) + return docList + +def queryRequests(db): + requests_ref = db.collection(u'requests') + docs = requests_ref.get() + docList = list() + for doc in docs: + docList.append(doc) + return docList + +def printSnapshot(doc): + print(u'Created {} => {}'.format(doc.id, doc.to_dict())) + + +def existsRequest(requests, userA, userB): + for request in requests: + sender = request.get(u'senderId') + receiver = request.get(u'receiverId') + if (userA.id == sender and userB.id == receiver) or (userB.id == sender and userA.id == receiver): + return True + return False + +def createRequest(sender, receiver): + doc_ref = db.collection(u'requests').document() + unread = random.choice([True, False]) + accepted = random.choice([True, False, False]) + if accepted: + acceptedTimestamp = datetime.datetime.now() + doc_ref.set({ + u'senderId': sender.id, + u'receiverId': receiver.id, + u'sentTimestamp': datetime.datetime.now(), + u'unread': unread, + u'accepted': accepted, + u'acceptedTimestamp': acceptedTimestamp + }) + doc = doc_ref.get() + return doc + +# very sub-optimal (but it's just a script) +def addRequest(db, users, requests): + # try upto 50 times + for i in range(50): + userA = random.choice(users) + userB = random.choice(users) + if userA.id == userB.id: + continue + if existsRequest(requests, userA, userB): + continue + return createRequest(userA, userB) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("-c", "--count", type=int, default=5) + args = parser.parse_args() + db = firestore.Client() + + users = queryUsers(db) + requests = queryRequests(db) + for i in range(0, args.count): + request = addRequest(db, users, requests) + if request is None: + print("Adding a request failed at count:" + str(i)) + break + requests.append(request) + printSnapshot(request) + + + # Uncomment to query all users + # queryUsers(db)
e5716c90e97d1364c551701f3bae772f08c9c561
upload/management/commands/import_sheet.py
upload/management/commands/import_sheet.py
import csv from django.contrib.auth.models import User from opencivicdata.models import Jurisdiction, Division from upload.backend.parser import import_stream from django.core.management.base import BaseCommand, CommandError class Command(BaseCommand): args = '<csv> <jurisdiction> <source> <user>' help = 'Load in Sheets' def load_csv(self, file_, jurisdiction_id, source, username): user = User.objects.get(username=username) jurisdiction = Jurisdiction.objects.get(id=jurisdiction_id) _, xtn = file_.rsplit(".", 1) sources = [source,] with open(file_, 'rb') as fd: transaction = import_stream( fd.read(), xtn, user, jurisdiction, sources, ) def handle(self, *args, **options): return self.load_csv(*args)
Add management command to do one-off imports.
Add management command to do one-off imports.
Python
bsd-3-clause
opencivicdata/opencivicdata.org,opencivicdata/opencivicdata.org,opencivicdata/opencivicdata.org
--- +++ @@ -0,0 +1,32 @@ +import csv +from django.contrib.auth.models import User +from opencivicdata.models import Jurisdiction, Division +from upload.backend.parser import import_stream + +from django.core.management.base import BaseCommand, CommandError + + +class Command(BaseCommand): + args = '<csv> <jurisdiction> <source> <user>' + help = 'Load in Sheets' + + def load_csv(self, file_, jurisdiction_id, source, username): + + user = User.objects.get(username=username) + + jurisdiction = Jurisdiction.objects.get(id=jurisdiction_id) + _, xtn = file_.rsplit(".", 1) + + sources = [source,] + + with open(file_, 'rb') as fd: + transaction = import_stream( + fd.read(), + xtn, + user, + jurisdiction, + sources, + ) + + def handle(self, *args, **options): + return self.load_csv(*args)
127dbd5779280fc62f56f06f8ef2733b7aa4cdd9
corehq/apps/case_search/tests/test_case_search_registry.py
corehq/apps/case_search/tests/test_case_search_registry.py
import uuid from django.test import TestCase from casexml.apps.case.mock import CaseBlock from corehq.apps.case_search.models import CaseSearchConfig from corehq.apps.domain.shortcuts import create_user from corehq.apps.es.tests.utils import ( case_search_es_setup, case_search_es_teardown, es_test, ) from corehq.apps.registry.tests.utils import ( Grant, Invitation, create_registry_for_test, ) from corehq.form_processor.tests.utils import run_with_sql_backend @es_test @run_with_sql_backend class TestCaseSearchRegistry(TestCase): # TODO convert to setUpClass def setUp(self): self.user = create_user("admin", "123") self.domain_1 = "jane-the-virgin" self.setup_domain(self.domain_1, [ ("Jane", {"family": "Villanueva"}), ("Xiomara", {"family": "Villanueva"}), ("Alba", {"family": "Villanueva"}), ("Rogelio", {"family": "de la Vega"}), ("Jane", {"family": "Ramos"}), ]) self.domain_2 = "jane-eyre" self.setup_domain(self.domain_2, [ ("Jane", {"family": "Eyre"}), ("Sarah", {"family": "Reed"}), ("John", {"family": "Reed"}), ("Eliza", {"family": "Reed"}), ("Georgiana", {"family": "Reed"}), ]) self.domain_3 = "janes-addiction" self.setup_domain(self.domain_3, [ ("Perry", {"family": "Farrell"}), ("Dave", {"family": "Navarro"}), ("Stephen", {"family": "Perkins"}), ("Chris", {"family": "Chaney"}), ]) create_registry_for_test( self.user, self.domain_1, invitations=[ Invitation(self.domain_2), Invitation(self.domain_3), ], grants=[ Grant(self.domain_1, [self.domain_2, self.domain_3]), Grant(self.domain_2, [self.domain_1]), Grant(self.domain_3, []), ], name="reg1", ) def setup_domain(self, domain, cases): CaseSearchConfig.objects.create(pk=domain, enabled=True) case_search_es_setup(domain, [ CaseBlock( case_id=str(uuid.uuid4()), case_type='person', case_name=name, create=True, update=properties, ) for name, properties in cases ]) def tearDown(self): case_search_es_teardown() def test(self): print("running")
Add test setup for registry case search
Add test setup for registry case search
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,84 @@ +import uuid + +from django.test import TestCase + +from casexml.apps.case.mock import CaseBlock + +from corehq.apps.case_search.models import CaseSearchConfig +from corehq.apps.domain.shortcuts import create_user +from corehq.apps.es.tests.utils import ( + case_search_es_setup, + case_search_es_teardown, + es_test, +) +from corehq.apps.registry.tests.utils import ( + Grant, + Invitation, + create_registry_for_test, +) +from corehq.form_processor.tests.utils import run_with_sql_backend + + +@es_test +@run_with_sql_backend +class TestCaseSearchRegistry(TestCase): + + # TODO convert to setUpClass + def setUp(self): + self.user = create_user("admin", "123") + self.domain_1 = "jane-the-virgin" + self.setup_domain(self.domain_1, [ + ("Jane", {"family": "Villanueva"}), + ("Xiomara", {"family": "Villanueva"}), + ("Alba", {"family": "Villanueva"}), + ("Rogelio", {"family": "de la Vega"}), + ("Jane", {"family": "Ramos"}), + ]) + self.domain_2 = "jane-eyre" + self.setup_domain(self.domain_2, [ + ("Jane", {"family": "Eyre"}), + ("Sarah", {"family": "Reed"}), + ("John", {"family": "Reed"}), + ("Eliza", {"family": "Reed"}), + ("Georgiana", {"family": "Reed"}), + ]) + self.domain_3 = "janes-addiction" + self.setup_domain(self.domain_3, [ + ("Perry", {"family": "Farrell"}), + ("Dave", {"family": "Navarro"}), + ("Stephen", {"family": "Perkins"}), + ("Chris", {"family": "Chaney"}), + ]) + + create_registry_for_test( + self.user, + self.domain_1, + invitations=[ + Invitation(self.domain_2), + Invitation(self.domain_3), + ], + grants=[ + Grant(self.domain_1, [self.domain_2, self.domain_3]), + Grant(self.domain_2, [self.domain_1]), + Grant(self.domain_3, []), + ], + name="reg1", + ) + + def setup_domain(self, domain, cases): + CaseSearchConfig.objects.create(pk=domain, enabled=True) + case_search_es_setup(domain, [ + CaseBlock( + case_id=str(uuid.uuid4()), + case_type='person', + case_name=name, + create=True, + update=properties, + ) for name, properties in cases + ]) + + def tearDown(self): + case_search_es_teardown() + + def test(self): + print("running")
3f3f6e2e3a7f62e7fcaa24c4260a0f09e0800b6a
tests/test_bounce.py
tests/test_bounce.py
import sys, pygame pygame.init() size = width, height = 320, 240 speed = [2, 2] black = 0, 0, 0 screen = pygame.display.set_mode(size) #ball = pygame.image.load("ball.bmp") ball = pygame.surface.Surface((100, 100)) ball.fill(pygame.Color(0, 0, 255, 255)) ballrect = ball.get_rect() clock = pygame.time.Clock() while 1: for event in pygame.event.get(): if event.type == pygame.QUIT: sys.exit() ballrect = ballrect.move(speed) if ballrect.left < 0 or ballrect.right > width: speed[0] = -speed[0] if ballrect.top < 0 or ballrect.bottom > height: speed[1] = -speed[1] screen.fill(black) screen.blit(ball, ballrect) pygame.display.flip() clock.tick(40)
Add simple animation example from pygame tutorial
Add simple animation example from pygame tutorial
Python
lgpl-2.1
caseyc37/pygame_cffi,CTPUG/pygame_cffi,CTPUG/pygame_cffi,CTPUG/pygame_cffi,GertBurger/pygame_cffi,GertBurger/pygame_cffi,caseyc37/pygame_cffi,caseyc37/pygame_cffi,GertBurger/pygame_cffi,GertBurger/pygame_cffi
--- +++ @@ -0,0 +1,30 @@ +import sys, pygame +pygame.init() + +size = width, height = 320, 240 +speed = [2, 2] +black = 0, 0, 0 + +screen = pygame.display.set_mode(size) + +#ball = pygame.image.load("ball.bmp") +ball = pygame.surface.Surface((100, 100)) +ball.fill(pygame.Color(0, 0, 255, 255)) +ballrect = ball.get_rect() +clock = pygame.time.Clock() + +while 1: + for event in pygame.event.get(): + if event.type == pygame.QUIT: + sys.exit() + + ballrect = ballrect.move(speed) + if ballrect.left < 0 or ballrect.right > width: + speed[0] = -speed[0] + if ballrect.top < 0 or ballrect.bottom > height: + speed[1] = -speed[1] + + screen.fill(black) + screen.blit(ball, ballrect) + pygame.display.flip() + clock.tick(40)
264b4112ccfdebeb7524036b6f32d49fa38bb321
tests/test_heroku.py
tests/test_heroku.py
"""Tests for the Wallace API.""" import subprocess import re import requests class TestHeroku(object): """The Heroku test class.""" def test_sandbox(self): """Launch the experiment on Heroku.""" sandbox_output = subprocess.check_output( "cd examples/bartlett1932; wallace sandbox --verbose", shell=True) id = re.search( 'Running as experiment (.*)...', sandbox_output).group(1) r = requests.get("http://{}.herokuapp.com/summary".format(id)) assert r.json()['status'] == []
Create test for sandboxing via Heroku
Create test for sandboxing via Heroku
Python
mit
berkeley-cocosci/Wallace,suchow/Wallace,Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,suchow/Wallace,berkeley-cocosci/Wallace,jcpeterson/Dallinger,suchow/Wallace,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,berkeley-cocosci/Wallace,Dallinger/Dallinger,Dallinger/Dallinger
--- +++ @@ -0,0 +1,22 @@ +"""Tests for the Wallace API.""" + +import subprocess +import re +import requests + + +class TestHeroku(object): + + """The Heroku test class.""" + + def test_sandbox(self): + """Launch the experiment on Heroku.""" + sandbox_output = subprocess.check_output( + "cd examples/bartlett1932; wallace sandbox --verbose", shell=True) + + id = re.search( + 'Running as experiment (.*)...', sandbox_output).group(1) + + r = requests.get("http://{}.herokuapp.com/summary".format(id)) + + assert r.json()['status'] == []
c20b1b8c2362f2484baacd15acd9d72ae1e2b6d7
tools/commitstats.py
tools/commitstats.py
# Run svn log -l <some number> import re import numpy as np import os names = re.compile(r'r\d+\s[|]\s(.*)\s[|]\s200') def get_count(filename, repo): mystr = open(filename).read() result = names.findall(mystr) u = np.unique(result) count = [(x,result.count(x),repo) for x in u] return count command = 'svn log -l 2300 > output.txt' os.chdir('..') os.system(command) count = get_count('output.txt', 'NumPy') os.chdir('../scipy') os.system(command) count.extend(get_count('output.txt', 'SciPy')) os.chdir('../scikits') os.system(command) count.extend(get_count('output.txt', 'SciKits')) count.sort() print "** SciPy and NumPy **" print "=====================" for val in count: print val
Add a tool for determining active SVN committers.
Add a tool for determining active SVN committers. git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@7427 94b884b6-d6fd-0310-90d3-974f1d3f35e1
Python
bsd-3-clause
teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,teoliphant/numpy-refactor,jasonmccampbell/numpy-refactor-sprint,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,Ademan/NumPy-GSoC,Ademan/NumPy-GSoC,teoliphant/numpy-refactor
--- +++ @@ -0,0 +1,43 @@ + +# Run svn log -l <some number> + +import re +import numpy as np +import os + +names = re.compile(r'r\d+\s[|]\s(.*)\s[|]\s200') + +def get_count(filename, repo): + mystr = open(filename).read() + result = names.findall(mystr) + u = np.unique(result) + count = [(x,result.count(x),repo) for x in u] + return count + + +command = 'svn log -l 2300 > output.txt' +os.chdir('..') +os.system(command) + +count = get_count('output.txt', 'NumPy') + + +os.chdir('../scipy') +os.system(command) + +count.extend(get_count('output.txt', 'SciPy')) + +os.chdir('../scikits') +os.system(command) +count.extend(get_count('output.txt', 'SciKits')) +count.sort() + + + +print "** SciPy and NumPy **" +print "=====================" +for val in count: + print val + + +
d3df6283db7e9ed56c41f4e7a866c8622743da40
set_markdown_template.py
set_markdown_template.py
import sublime import sublime_plugin from os.path import exists, join TEMPLATE_NAME = "custom-template.html" def set_template(): path = join(sublime.packages_path(), "User", TEMPLATE_NAME) settings = sublime.load_settings("MarkdownPreview.sublime-settings") if settings.get("html_template") != path: print("setting") settings.set("html_template", path) sublime.save_settings("MarkdownPreview.sublime-settings") class SetMarkdownTemplate(sublime_plugin.ApplicationCommand): def run(self): set_template() def plugin_loaded(): set_template()
Add script to set markdown template
Add script to set markdown template
Python
mit
facelessuser/SublimeRandomCrap,facelessuser/SublimeRandomCrap
--- +++ @@ -0,0 +1,23 @@ +import sublime +import sublime_plugin +from os.path import exists, join + +TEMPLATE_NAME = "custom-template.html" + + +def set_template(): + path = join(sublime.packages_path(), "User", TEMPLATE_NAME) + settings = sublime.load_settings("MarkdownPreview.sublime-settings") + if settings.get("html_template") != path: + print("setting") + settings.set("html_template", path) + sublime.save_settings("MarkdownPreview.sublime-settings") + + +class SetMarkdownTemplate(sublime_plugin.ApplicationCommand): + def run(self): + set_template() + + +def plugin_loaded(): + set_template()
53b17d83300d5d1607e0124229bf830cb1eb8a31
xmlrpc_download.py
xmlrpc_download.py
#!/usr/bin/env python import json import sys import xmlrpc.client # XXX Edit this to your liking MAX_BUG_ID = 3210 EXPORT_FILE = "bugzilla.json" BLACKLIST = [489, 3188] class RPCEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, xmlrpc.client.DateTime): return o.value raise NotImplementedError def main(): if len(sys.argv) < 2: sys.stderr.write("Usage: %s [URL TO XML-RPC]\n" % (sys.argv[0])) exit(1) print("Connecting to %r" % (sys.argv[1])) bugzilla = xmlrpc.client.ServerProxy(sys.argv[1]) print("Exporting products") products = bugzilla.Product.get(bugzilla.Product.get_selectable_products())["products"] print("Exporting bugs") valid_ids = filter(lambda i: i not in BLACKLIST, range(1, MAX_BUG_ID)) bugs = bugzilla.Bug.get({"ids": list(valid_ids), "permissive": True})["bugs"] valid_ids = [k["id"] for k in bugs] print("Exporting bug history") history = bugzilla.Bug.history({"ids": valid_ids})["bugs"] print("Exporting comments") _comments = bugzilla.Bug.comments({"ids": valid_ids})["bugs"] # god damn it bugzilla comments = {int(id): _comments[id] for id in _comments} for histitem, bug in zip(history, bugs): assert histitem["id"] == bug["id"] bug["history"] = histitem["history"] # turn bugs into a dict bugs = {bug["id"]: bug for bug in bugs} for id in comments: bugs[id]["comments"] = comments[id]["comments"] with open(EXPORT_FILE, "w") as f: f.write(json.dumps(bugs, cls=RPCEncoder)) if __name__ == "__main__": main()
Add an XML-RPC downloader for bugzilla
Add an XML-RPC downloader for bugzilla
Python
mit
jleclanche/bugzilla-to-github
--- +++ @@ -0,0 +1,60 @@ +#!/usr/bin/env python + +import json +import sys +import xmlrpc.client + + +# XXX Edit this to your liking +MAX_BUG_ID = 3210 +EXPORT_FILE = "bugzilla.json" +BLACKLIST = [489, 3188] + + +class RPCEncoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, xmlrpc.client.DateTime): + return o.value + raise NotImplementedError + + +def main(): + if len(sys.argv) < 2: + sys.stderr.write("Usage: %s [URL TO XML-RPC]\n" % (sys.argv[0])) + exit(1) + + print("Connecting to %r" % (sys.argv[1])) + bugzilla = xmlrpc.client.ServerProxy(sys.argv[1]) + + print("Exporting products") + products = bugzilla.Product.get(bugzilla.Product.get_selectable_products())["products"] + + print("Exporting bugs") + valid_ids = filter(lambda i: i not in BLACKLIST, range(1, MAX_BUG_ID)) + bugs = bugzilla.Bug.get({"ids": list(valid_ids), "permissive": True})["bugs"] + valid_ids = [k["id"] for k in bugs] + + print("Exporting bug history") + history = bugzilla.Bug.history({"ids": valid_ids})["bugs"] + + print("Exporting comments") + _comments = bugzilla.Bug.comments({"ids": valid_ids})["bugs"] + # god damn it bugzilla + comments = {int(id): _comments[id] for id in _comments} + + for histitem, bug in zip(history, bugs): + assert histitem["id"] == bug["id"] + bug["history"] = histitem["history"] + + # turn bugs into a dict + bugs = {bug["id"]: bug for bug in bugs} + + for id in comments: + bugs[id]["comments"] = comments[id]["comments"] + + with open(EXPORT_FILE, "w") as f: + f.write(json.dumps(bugs, cls=RPCEncoder)) + + +if __name__ == "__main__": + main()
d1afa600338bb0d9c1c040a42b6de5504e48d699
similar_photos_sqlite.py
similar_photos_sqlite.py
import graphlab import sqlite3 def main(): # load photos with their deep features photos = graphlab.SFrame('photos_deep_features.gl') # train a nearest neighbors model on deep features of photos nn_model = graphlab.nearest_neighbors.create(photos, features=['deep_features'], label='path') # sqlite database: key = photo name (p), value = list of names of 12 similar photos (p0, ..., p11) conn = sqlite3.connect('yelp-photo-explorer.sqlite') c = conn.cursor() c.execute('CREATE TABLE photos (p, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11)') # for each photo make an entry in the database for i in xrange(100): if i % 100 == 0: print 'processed ' + str(i) + ' out of ' + str(len(photos)) + '...' query = nn_model.query(photos[i:i+1], k=13, verbose=False) similar = [] for s in query['reference_label']: similar.append(s[55:]) c.execute('INSERT INTO photos VALUES ("' + '", "'.join(similar) + '")') conn.commit() conn.close() if __name__ == "__main__": main()
Store names of similar photos into sqlite database
Store names of similar photos into sqlite database
Python
mit
aysent/yelp-photo-explorer
--- +++ @@ -0,0 +1,29 @@ +import graphlab +import sqlite3 + +def main(): + + # load photos with their deep features + photos = graphlab.SFrame('photos_deep_features.gl') + + # train a nearest neighbors model on deep features of photos + nn_model = graphlab.nearest_neighbors.create(photos, features=['deep_features'], label='path') + + # sqlite database: key = photo name (p), value = list of names of 12 similar photos (p0, ..., p11) + conn = sqlite3.connect('yelp-photo-explorer.sqlite') + c = conn.cursor() + c.execute('CREATE TABLE photos (p, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11)') + + # for each photo make an entry in the database + for i in xrange(100): + if i % 100 == 0: print 'processed ' + str(i) + ' out of ' + str(len(photos)) + '...' + query = nn_model.query(photos[i:i+1], k=13, verbose=False) + similar = [] + for s in query['reference_label']: similar.append(s[55:]) + c.execute('INSERT INTO photos VALUES ("' + '", "'.join(similar) + '")') + + conn.commit() + conn.close() + +if __name__ == "__main__": + main()
272b2238ce9d0d8d1424a470bb7f4f7b41edd9e0
script/unarchive-forecast.py
script/unarchive-forecast.py
#!/usr/bin/env python3 import pickle import sys class Forecast: pass for fn in sys.argv: with open(fn,"rb") as fp: forecast = pickle.load(fp) print(dir(forecast))
Add unarchiver of the forecast
Add unarchiver of the forecast
Python
mit
nushio3/UFCORIN,nushio3/UFCORIN,nushio3/UFCORIN,nushio3/UFCORIN,nushio3/UFCORIN
--- +++ @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +import pickle +import sys + +class Forecast: + pass + +for fn in sys.argv: + with open(fn,"rb") as fp: + forecast = pickle.load(fp) + print(dir(forecast))
733726467c397ff530a556e9a624466994e7c13c
wagtailmenus/tests/test_commands.py
wagtailmenus/tests/test_commands.py
from __future__ import absolute_import, unicode_literals from django.test import TestCase from django.core.management import call_command from wagtail.wagtailcore.models import Site from wagtailmenus import app_settings class TestAutoPopulateMainMenus(TestCase): fixtures = ['test.json'] def setUp(self): super(TestAutoPopulateMainMenus, self).setUp() # Delete any existing main menus and their items self.model = app_settings.MAIN_MENU_MODEL_CLASS self.model.objects.all().delete() def test_with_home_links(self): call_command('autopopulate_main_menus') site = Site.objects.all().first() menu = self.model.get_for_site(site) menu_items = menu.get_menu_items_manager() # Confirm that there are menu items self.assertTrue(menu_items.count()) # Confirm that the first item is a home page link self.assertTrue(menu_items.first().menu_text == 'Home') def test_without_home_links(self): call_command('autopopulate_main_menus', add_home_links=False) site = Site.objects.all().first() menu = self.model.get_for_site(site) menu_items = menu.get_menu_items_manager() # Confirm that there are menu items self.assertTrue(menu_items.count()) # Confirm that the first item is NOT a home page link self.assertFalse(menu_items.first().menu_text == 'Home')
Add tests for new command
Add tests for new command
Python
mit
rkhleics/wagtailmenus,rkhleics/wagtailmenus,ababic/wagtailmenus,rkhleics/wagtailmenus,ababic/wagtailmenus,ababic/wagtailmenus
--- +++ @@ -0,0 +1,41 @@ +from __future__ import absolute_import, unicode_literals + +from django.test import TestCase +from django.core.management import call_command +from wagtail.wagtailcore.models import Site + +from wagtailmenus import app_settings + + +class TestAutoPopulateMainMenus(TestCase): + fixtures = ['test.json'] + + def setUp(self): + super(TestAutoPopulateMainMenus, self).setUp() + # Delete any existing main menus and their items + self.model = app_settings.MAIN_MENU_MODEL_CLASS + self.model.objects.all().delete() + + def test_with_home_links(self): + call_command('autopopulate_main_menus') + site = Site.objects.all().first() + menu = self.model.get_for_site(site) + menu_items = menu.get_menu_items_manager() + + # Confirm that there are menu items + self.assertTrue(menu_items.count()) + + # Confirm that the first item is a home page link + self.assertTrue(menu_items.first().menu_text == 'Home') + + def test_without_home_links(self): + call_command('autopopulate_main_menus', add_home_links=False) + site = Site.objects.all().first() + menu = self.model.get_for_site(site) + menu_items = menu.get_menu_items_manager() + + # Confirm that there are menu items + self.assertTrue(menu_items.count()) + + # Confirm that the first item is NOT a home page link + self.assertFalse(menu_items.first().menu_text == 'Home')
4cb37cabb3aa171391958f4d6e6d0eb5b8731989
climate_data/migrations/0022_auto_20170623_0236.py
climate_data/migrations/0022_auto_20170623_0236.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-06-23 02:36 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('climate_data', '0021_auto_20170619_2053'), ] operations = [ migrations.AlterModelOptions( name='stationsensorlink', options={'ordering': ('station_order',), 'verbose_name': 'station-sensor link'}, ), migrations.AlterField( model_name='message', name='goes_id', field=models.CharField(db_index=True, max_length=8, verbose_name='GOES ID'), ), migrations.AlterField( model_name='message', name='recorded_message_length', field=models.PositiveSmallIntegerField(verbose_name='Message Length'), ), migrations.AlterField( model_name='reading', name='qc_processed', field=models.BooleanField(default=False, verbose_name='QC Processed'), ), ]
Add migration which updates field / model metadata.
Add migration which updates field / model metadata.
Python
apache-2.0
qubs/data-centre,qubs/climate-data-api,qubs/data-centre,qubs/climate-data-api
--- +++ @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.10.6 on 2017-06-23 02:36 +from __future__ import unicode_literals + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('climate_data', '0021_auto_20170619_2053'), + ] + + operations = [ + migrations.AlterModelOptions( + name='stationsensorlink', + options={'ordering': ('station_order',), 'verbose_name': 'station-sensor link'}, + ), + migrations.AlterField( + model_name='message', + name='goes_id', + field=models.CharField(db_index=True, max_length=8, verbose_name='GOES ID'), + ), + migrations.AlterField( + model_name='message', + name='recorded_message_length', + field=models.PositiveSmallIntegerField(verbose_name='Message Length'), + ), + migrations.AlterField( + model_name='reading', + name='qc_processed', + field=models.BooleanField(default=False, verbose_name='QC Processed'), + ), + ]
32525203ee392be60c0ea32a817323ccf5cace12
kirppu/tests/test_itemdump.py
kirppu/tests/test_itemdump.py
from django.test import Client, TestCase from ..models import Item from .factories import EventFactory, EventPermissionFactory, ItemFactory, ItemTypeFactory, UserFactory, VendorFactory class ItemDumpTest(TestCase): def _addPermission(self): EventPermissionFactory(event=self.event, user=self.user, can_see_accounting=True) def _addItems(self, count=5): vendor = VendorFactory(user=self.user, event=self.event) itemtype = ItemTypeFactory() for _ in range(count): ItemFactory(vendor=vendor, itemtype=itemtype, state=Item.BROUGHT) def _get(self, query=""): return self.c.get("/kirppu/%s/itemdump/" % self.event.slug + "?" + query) def setUp(self): self.user = UserFactory() self.event = EventFactory() self.c = Client() self.c.force_login(self.user) def test_defaultState(self): self._addPermission() resp = self._get() self.assertEqual(200, resp.status_code) def test_noPermission(self): resp = self._get() self.assertEqual(403, resp.status_code) def test_csv(self): self._addPermission() self._addItems(count=5) resp = self._get() self.assertEqual(200, resp.status_code) self.assertFalse(resp.has_header("Content-Disposition")) # CSV: 5 items + header self.assertEqual(5 + 1, resp.getvalue().count(b"\n")) def test_text(self): self._addPermission() self._addItems(count=5) resp = self._get(query="txt") self.assertEqual(200, resp.status_code) content = resp.getvalue() # Text: 5 items + 7 header rows (1 per column) self.assertEqual(5 + 7, content.count(b"\n")) def test_download(self): self._addPermission() self._addItems(count=5) resp = self._get(query="download") self.assertEqual(200, resp.status_code) self.assertTrue(resp.has_header("Content-Disposition")) self.assertTrue(resp["Content-Type"].startswith("text/csv")) content = resp.getvalue() # CSV: 5 items + header self.assertEqual(5 + 1, content.count(b"\n"))
Add simple test for item dump.
Add simple test for item dump.
Python
mit
jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu
--- +++ @@ -0,0 +1,68 @@ +from django.test import Client, TestCase + +from ..models import Item +from .factories import EventFactory, EventPermissionFactory, ItemFactory, ItemTypeFactory, UserFactory, VendorFactory + + +class ItemDumpTest(TestCase): + def _addPermission(self): + EventPermissionFactory(event=self.event, user=self.user, can_see_accounting=True) + + def _addItems(self, count=5): + vendor = VendorFactory(user=self.user, event=self.event) + itemtype = ItemTypeFactory() + for _ in range(count): + ItemFactory(vendor=vendor, itemtype=itemtype, state=Item.BROUGHT) + + def _get(self, query=""): + return self.c.get("/kirppu/%s/itemdump/" % self.event.slug + "?" + query) + + def setUp(self): + self.user = UserFactory() + self.event = EventFactory() + self.c = Client() + self.c.force_login(self.user) + + def test_defaultState(self): + self._addPermission() + resp = self._get() + self.assertEqual(200, resp.status_code) + + def test_noPermission(self): + resp = self._get() + self.assertEqual(403, resp.status_code) + + def test_csv(self): + self._addPermission() + self._addItems(count=5) + resp = self._get() + + self.assertEqual(200, resp.status_code) + self.assertFalse(resp.has_header("Content-Disposition")) + + # CSV: 5 items + header + self.assertEqual(5 + 1, resp.getvalue().count(b"\n")) + + def test_text(self): + self._addPermission() + self._addItems(count=5) + resp = self._get(query="txt") + + self.assertEqual(200, resp.status_code) + + content = resp.getvalue() + # Text: 5 items + 7 header rows (1 per column) + self.assertEqual(5 + 7, content.count(b"\n")) + + def test_download(self): + self._addPermission() + self._addItems(count=5) + resp = self._get(query="download") + + self.assertEqual(200, resp.status_code) + self.assertTrue(resp.has_header("Content-Disposition")) + self.assertTrue(resp["Content-Type"].startswith("text/csv")) + + content = resp.getvalue() + # CSV: 5 items + header + self.assertEqual(5 + 1, content.count(b"\n"))
e5e068c5fa94d68aa81dbcd3e498ba17dae37d2c
axelrod/tests/test_reflex.py
axelrod/tests/test_reflex.py
""" Test suite for Reflex Axelrod PD player. """ import axelrod from test_player import TestPlayer class Reflex_test(TestPlayer): def test_initial_nice_strategy(self): """ First response should always be cooperation. """ p1 = axelrod.Reflex() p2 = axelrod.Player() self.assertEqual(p1.strategy(p2), 'C') def test_representation(self): """ How do we appear? """ p1 = axelrod.Reflex() self.assertEqual(str(p1), "Reflex") def test_reset_method(self): """ Does self.reset() reset the self? """ p1 = axelrod.Reflex() p1.history = ['C', 'D', 'C', 'C'] p1.reset() self.assertEqual(p1.history, []) self.assertEqual(p1.response, 'C') def test_stochastic(self): """ We are not stochastic. """ self.assertFalse(axelrod.Reflex().stochastic)
""" Test suite for Reflex Axelrod PD player. """ import axelrod from test_player import TestPlayer class Reflex_test(TestPlayer): name = "Reflex" player = axelrod.Reflex stochastic = False def test_strategy(self): """ First response should always be cooperation. """ p1 = axelrod.Reflex() p2 = axelrod.Player() self.assertEqual(p1.strategy(p2), 'C') def test_reset_method(self): """ Does self.reset() reset the self? """ p1 = axelrod.Reflex() p1.history = ['C', 'D', 'C', 'C'] p1.reset() self.assertEqual(p1.history, []) self.assertEqual(p1.response, 'C')
Simplify tests to new format.
Simplify tests to new format.
Python
mit
emmagordon/Axelrod,uglyfruitcake/Axelrod,kathryncrouch/Axelrod,emmagordon/Axelrod,mojones/Axelrod,drvinceknight/Axelrod,bootandy/Axelrod,uglyfruitcake/Axelrod,bootandy/Axelrod,risicle/Axelrod,risicle/Axelrod,mojones/Axelrod,kathryncrouch/Axelrod
--- +++ @@ -7,17 +7,16 @@ class Reflex_test(TestPlayer): - def test_initial_nice_strategy(self): + name = "Reflex" + player = axelrod.Reflex + stochastic = False + + + def test_strategy(self): """ First response should always be cooperation. """ p1 = axelrod.Reflex() p2 = axelrod.Player() self.assertEqual(p1.strategy(p2), 'C') - - - def test_representation(self): - """ How do we appear? """ - p1 = axelrod.Reflex() - self.assertEqual(str(p1), "Reflex") def test_reset_method(self): @@ -27,7 +26,3 @@ p1.reset() self.assertEqual(p1.history, []) self.assertEqual(p1.response, 'C') - - def test_stochastic(self): - """ We are not stochastic. """ - self.assertFalse(axelrod.Reflex().stochastic)
357067b0cfe6fd781813404ba7d587f5bd00917a
bazaar/goods/utils.py
bazaar/goods/utils.py
from __future__ import unicode_literals from django.core.exceptions import ImproperlyConfigured from .models import Product, PriceList def get_default_price_list(): """ Return the default price list """ try: return PriceList.objects.get(default=True) except PriceList.DoesNotExist: raise ImproperlyConfigured("A default price list must exists. Please create one") def create_product_for_good(good, price, quantity=1): """ Creates a product for the specified `good` with `quantity`. `price` is set to the default price list. Returns the new product instance """ product = Product.objects.create(name=good.name, description=good.description) product.save() # Add good to product elements list product.elements.create(good=good, quantity=quantity) # Set product's base price on default price list default_price_list = get_default_price_list() product.prices.create(product=product, price_list=default_price_list, price=price) return product
Add utility function to retrieve default price list and to create a product from a good
Add utility function to retrieve default price list and to create a product from a good
Python
bsd-2-clause
evonove/django-bazaar,meghabhoj/NEWBAZAAR,meghabhoj/NEWBAZAAR,meghabhoj/NEWBAZAAR,evonove/django-bazaar,evonove/django-bazaar
--- +++ @@ -0,0 +1,34 @@ +from __future__ import unicode_literals + +from django.core.exceptions import ImproperlyConfigured + +from .models import Product, PriceList + + +def get_default_price_list(): + """ + Return the default price list + """ + try: + return PriceList.objects.get(default=True) + except PriceList.DoesNotExist: + raise ImproperlyConfigured("A default price list must exists. Please create one") + + +def create_product_for_good(good, price, quantity=1): + """ + Creates a product for the specified `good` with `quantity`. `price` is set to the default price list. + Returns the new product instance + """ + + product = Product.objects.create(name=good.name, description=good.description) + product.save() + + # Add good to product elements list + product.elements.create(good=good, quantity=quantity) + + # Set product's base price on default price list + default_price_list = get_default_price_list() + product.prices.create(product=product, price_list=default_price_list, price=price) + + return product
576c2a1565fe9860c1188a9862b54e24aab64ed4
tests/test_update_languages.py
tests/test_update_languages.py
# tests.test_update_languagess # coding=utf-8 from __future__ import unicode_literals import nose.tools as nose from mock import patch import utilities.update_languages as update_langs from tests import set_up, tear_down from tests.decorators import redirect_stdout_unicode @nose.with_setup(set_up, tear_down) @patch('sys.argv', [update_langs.__file__, 'swe']) @patch('utilities.update_languages.update_language') @redirect_stdout_unicode def test_update_languages(out, update_language): """should perform all necessary steps to update all languages""" update_langs.update_languages() nose.assert_equal(update_language.call_count, 21) @nose.with_setup(set_up, tear_down) @patch('sys.argv', [update_langs.__file__, 'swe']) @patch('utilities.update_languages.update_languages') @redirect_stdout_unicode def test_main(out, update_languages): """main function should pass correct arguments to update_languages""" update_langs.main() update_languages.assert_called_once_with() @patch('utilities.update_languages.update_languages', side_effect=KeyboardInterrupt) @redirect_stdout_unicode def test_main_keyboardinterrupt(out, update_languages): """main function should quit gracefully when ^C is pressed""" nose.assert_is_none(update_langs.main())
Add tests for update_languages utility
Add tests for update_languages utility
Python
mit
caleb531/youversion-suggest,caleb531/youversion-suggest
--- +++ @@ -0,0 +1,39 @@ +# tests.test_update_languagess +# coding=utf-8 + +from __future__ import unicode_literals + +import nose.tools as nose +from mock import patch + +import utilities.update_languages as update_langs +from tests import set_up, tear_down +from tests.decorators import redirect_stdout_unicode + + +@nose.with_setup(set_up, tear_down) +@patch('sys.argv', [update_langs.__file__, 'swe']) +@patch('utilities.update_languages.update_language') +@redirect_stdout_unicode +def test_update_languages(out, update_language): + """should perform all necessary steps to update all languages""" + update_langs.update_languages() + nose.assert_equal(update_language.call_count, 21) + + +@nose.with_setup(set_up, tear_down) +@patch('sys.argv', [update_langs.__file__, 'swe']) +@patch('utilities.update_languages.update_languages') +@redirect_stdout_unicode +def test_main(out, update_languages): + """main function should pass correct arguments to update_languages""" + update_langs.main() + update_languages.assert_called_once_with() + + +@patch('utilities.update_languages.update_languages', + side_effect=KeyboardInterrupt) +@redirect_stdout_unicode +def test_main_keyboardinterrupt(out, update_languages): + """main function should quit gracefully when ^C is pressed""" + nose.assert_is_none(update_langs.main())
66da5a6bd67ae3645eeff5856ae4614e4be9f5d8
microdrop/tests/update_dmf_control_board.py
microdrop/tests/update_dmf_control_board.py
import os import subprocess if __name__ == '__main__': os.chdir('microdrop/plugins') if not os.path.exists('dmf_control_board'): print 'Clone dmf_control_board repository...' subprocess.call(['git', 'clone', 'http://microfluidics.utoronto.ca/git/dmf_control_board.git']) else: print 'Fetch lastest update...' subprocess.call(['git', 'pull'])
Add script for downloading latest dmf_control_board
Add script for downloading latest dmf_control_board
Python
bsd-3-clause
wheeler-microfluidics/microdrop
--- +++ @@ -0,0 +1,13 @@ +import os +import subprocess + +if __name__ == '__main__': + os.chdir('microdrop/plugins') + + if not os.path.exists('dmf_control_board'): + print 'Clone dmf_control_board repository...' + subprocess.call(['git', 'clone', + 'http://microfluidics.utoronto.ca/git/dmf_control_board.git']) + else: + print 'Fetch lastest update...' + subprocess.call(['git', 'pull'])
4173d3abeeda29ffbd81379233e88311780b6b09
tests/test_load.py
tests/test_load.py
from .utils import TemplateTestCase, Mock from knights import Template class LoadTagTest(TemplateTestCase): def test_load_default(self): t = Template('{! knights.defaultfilters !}') self.assertIn('title', t.parser.filters)
Add a test for library loading
Add a test for library loading
Python
mit
funkybob/knights-templater,funkybob/knights-templater
--- +++ @@ -0,0 +1,10 @@ +from .utils import TemplateTestCase, Mock + +from knights import Template + + +class LoadTagTest(TemplateTestCase): + + def test_load_default(self): + t = Template('{! knights.defaultfilters !}') + self.assertIn('title', t.parser.filters)
fe62ab5e609aba0c1739ca81d5cdd266d208a217
Build/make_payload.py
Build/make_payload.py
'''Quick and dirty script to generate vs.payload blocks for a set of URLs. Usage: make_payload.py URL [URL ...] ''' __author__ = 'Steve Dower <steve.dower@microsoft.com>' __version__ = '0.1' import hashlib import os import urllib.request import sys for u in sys.argv[1:]: is_temp = False if os.path.isfile(u): p = u name = None else: p, r = urllib.request.urlretrieve(u) try: name = r.get_filename() except: name = None is_temp = True if not name: try: _, name = os.path.split(u) except: try: _, name = os.path.split(p) except: name = '<unknown>' f_len = 0 f_hash = hashlib.sha256() with open(p, 'rb') as f: data = f.read(1024 * 1024) while data: f_len += len(data) f_hash.update(data) data = f.read(1024 * 1024) if is_temp: try: os.unlink(p) except: pass print(f' vs.payload size={f_len}') print(f' url={u}') print(f' fileName={name}') print(f' sha256={f_hash.hexdigest()}') print()
Add MSI generating tool for python updating
Add MSI generating tool for python updating
Python
apache-2.0
int19h/PTVS,int19h/PTVS,int19h/PTVS,int19h/PTVS,int19h/PTVS,int19h/PTVS
--- +++ @@ -0,0 +1,58 @@ +'''Quick and dirty script to generate vs.payload blocks for a set of URLs. + +Usage: + make_payload.py URL [URL ...] + + +''' + +__author__ = 'Steve Dower <steve.dower@microsoft.com>' +__version__ = '0.1' + +import hashlib +import os +import urllib.request +import sys + +for u in sys.argv[1:]: + is_temp = False + if os.path.isfile(u): + p = u + name = None + else: + p, r = urllib.request.urlretrieve(u) + try: + name = r.get_filename() + except: + name = None + is_temp = True + + if not name: + try: + _, name = os.path.split(u) + except: + try: + _, name = os.path.split(p) + except: + name = '<unknown>' + + f_len = 0 + f_hash = hashlib.sha256() + with open(p, 'rb') as f: + data = f.read(1024 * 1024) + while data: + f_len += len(data) + f_hash.update(data) + data = f.read(1024 * 1024) + + if is_temp: + try: + os.unlink(p) + except: + pass + + print(f' vs.payload size={f_len}') + print(f' url={u}') + print(f' fileName={name}') + print(f' sha256={f_hash.hexdigest()}') + print()
739e302506cb542011b8f022c6175637feaf20b4
misc/disablepasscomplexity.py
misc/disablepasscomplexity.py
#!/usr/bin/env python import pyghmi.util.webclient as webclient import json import os import sys tmppassword = 'to3BdS91ABrd' missingargs = False if 'XCCUSER' not in os.environ: print('Must set XCCUSER environment variable') missingargs = True if 'XCCPASS' not in os.environ: print('Must set XCCPASS environment variable') missingargs = True if missingargs: sys.exit(1) w = webclient.SecureHTTPConnection(sys.argv[1], 443, verifycallback=lambda x: True) w.connect() adata = json.dumps({'username': os.environ['XCCUSER'], 'password': os.environ['XCCPASS']}) headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} w.request('POST', '/api/login', adata, headers) rsp = w.getresponse() if rsp.status == 200: rspdata = json.loads(rsp.read()) w.set_header('Content-Type', 'application/json') w.set_header('Authorization', 'Bearer ' + rspdata['access_token']) if '_csrf_token' in w.cookies: w.set_header('X-XSRF-TOKEN', w.cookies['_csrf_token']) print(repr(w.grab_json_response('/api/dataset', { 'USER_GlobalPassComplexRequired': '0', })))
Add an example for just disabling password complexity
Add an example for just disabling password complexity
Python
apache-2.0
jjohnson42/confluent,jjohnson42/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,jjohnson42/confluent,xcat2/confluent,xcat2/confluent,jjohnson42/confluent
--- +++ @@ -0,0 +1,33 @@ +#!/usr/bin/env python +import pyghmi.util.webclient as webclient +import json +import os +import sys + +tmppassword = 'to3BdS91ABrd' +missingargs = False +if 'XCCUSER' not in os.environ: + print('Must set XCCUSER environment variable') + missingargs = True +if 'XCCPASS' not in os.environ: + print('Must set XCCPASS environment variable') + missingargs = True +if missingargs: + sys.exit(1) + +w = webclient.SecureHTTPConnection(sys.argv[1], 443, verifycallback=lambda x: True) +w.connect() +adata = json.dumps({'username': os.environ['XCCUSER'], 'password': os.environ['XCCPASS']}) +headers = {'Connection': 'keep-alive', 'Content-Type': 'application/json'} +w.request('POST', '/api/login', adata, headers) +rsp = w.getresponse() +if rsp.status == 200: + rspdata = json.loads(rsp.read()) + w.set_header('Content-Type', 'application/json') + w.set_header('Authorization', 'Bearer ' + rspdata['access_token']) + if '_csrf_token' in w.cookies: + w.set_header('X-XSRF-TOKEN', w.cookies['_csrf_token']) + print(repr(w.grab_json_response('/api/dataset', { + 'USER_GlobalPassComplexRequired': '0', + }))) +
72c38a8b67b23080ab9fea7a6fd3405b2f88ad7a
wm_metrics/count_articles_improved_for_image_collection.py
wm_metrics/count_articles_improved_for_image_collection.py
# -*- coding: utf-8 -*- """Analysing a Glamorous report to identify articles improved.""" import sys import xml.dom.minidom def handle_node_attribute(node, tag_name, attribute_name): """Return the contents of a tag based on his given name inside of a given node.""" element = node.getElementsByTagName(tag_name) attr = element.item(0).getAttribute(attribute_name) return attr def get_articles_from_glamorous_xml(doc): articles = [] for first_node in doc.childNodes: if first_node.localName == u'results': for details_node in first_node.childNodes: if details_node.localName == u'details': for image_node in details_node.childNodes: if image_node.localName == u'image': project = handle_node_attribute(image_node, u'project', u'name') for page_node in image_node.getElementsByTagName('page'): page = page_node.getAttribute('title') articles.append((project, page)) return articles def analyse_glamorous_xml(xml_text): doc = xml.dom.minidom.parseString(xml_text) articles_list = get_articles_from_glamorous_xml(doc) fused = ["%s:%s" % page for page in articles_list] print '\n'.join(sorted(fused)) print len(fused) print len(set(fused)) def main(): if len(sys.argv) < 2: print "Please provide a Glamourous file" sys.exit() xml_document = open(sys.argv[1], 'r') xml_text = xml_document.read() analyse_glamorous_xml(xml_text) if __name__ == "__main__": main()
Add script to compute articles improved for media collection
Add script to compute articles improved for media collection
Python
mit
Commonists/wm_metrics,Commonists/wm_metrics,Commonists/wm_metrics,Commonists/wm_metrics
--- +++ @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +"""Analysing a Glamorous report to identify articles improved.""" + +import sys +import xml.dom.minidom + + +def handle_node_attribute(node, tag_name, attribute_name): + """Return the contents of a tag based on his given name inside of a given node.""" + element = node.getElementsByTagName(tag_name) + attr = element.item(0).getAttribute(attribute_name) + return attr + + +def get_articles_from_glamorous_xml(doc): + articles = [] + for first_node in doc.childNodes: + if first_node.localName == u'results': + for details_node in first_node.childNodes: + if details_node.localName == u'details': + for image_node in details_node.childNodes: + if image_node.localName == u'image': + project = handle_node_attribute(image_node, u'project', u'name') + for page_node in image_node.getElementsByTagName('page'): + page = page_node.getAttribute('title') + articles.append((project, page)) + return articles + + +def analyse_glamorous_xml(xml_text): + doc = xml.dom.minidom.parseString(xml_text) + articles_list = get_articles_from_glamorous_xml(doc) + fused = ["%s:%s" % page for page in articles_list] + print '\n'.join(sorted(fused)) + print len(fused) + print len(set(fused)) + + +def main(): + if len(sys.argv) < 2: + print "Please provide a Glamourous file" + sys.exit() + xml_document = open(sys.argv[1], 'r') + xml_text = xml_document.read() + analyse_glamorous_xml(xml_text) + + +if __name__ == "__main__": + main()
bd2f302e2bcc02a3f222d0c00be9fe61351517e2
flask_typecheck_decorator.py
flask_typecheck_decorator.py
#!/usr/bin/env python3 import json from flask import Response, Flask, request import inspect from typecheck import typecheck from typing import ( List, Dict) def typed_service(func): def service(): print(request.json) print(type(request.json)) args_dict: Dict = request.json arg_inspect = inspect.getfullargspec(func) # if the function accepts an additional dictionary of arbitrary items, accept unknown arguments if arg_inspect.varkw is None: for k in args_dict.keys(): if k not in func.__annotations__: return Response(json.dumps({ "invalid_argument_name": k, "error": "unknown argument name" }), status=400, mimetype='application/json') for (arg_name, arg_type) in func.__annotations__.items(): if arg_name == 'return': continue if not typecheck.check_type(args_dict[arg_name], arg_type): return Response(json.dumps({ "invalid_argument_name": arg_name, "error": "invalid type", "expected_type": str(arg_type), "received_value": args_dict[arg_name] }), status=400, mimetype='application/json') js = json.dumps(func(**request.json)) resp = Response(js, status=200, mimetype='application/json') return resp return service app = Flask(__name__) @app.route('/bark_typed', methods=['POST']) @typed_service def bark(name: str, number_of_barks: int = 3, friends: List[str] = []) -> Dict: return {'number of barks': number_of_barks, 'bark message': ' '.join(([name, "woof!"] * number_of_barks)), 'my friends are': friends } @app.route('/bark', methods=['POST']) def api_bark(): js = json.dumps(bark("William", 4)) resp = Response(js, status=200, mimetype='application/json') return resp if __name__ == '__main__': app.run()
Add first draft of Flask type check decorator
Add first draft of Flask type check decorator
Python
mit
jacopofar/runtime_typecheck
--- +++ @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +import json +from flask import Response, Flask, request + +import inspect +from typecheck import typecheck +from typing import ( + List, + Dict) + + +def typed_service(func): + def service(): + print(request.json) + print(type(request.json)) + args_dict: Dict = request.json + arg_inspect = inspect.getfullargspec(func) + # if the function accepts an additional dictionary of arbitrary items, accept unknown arguments + if arg_inspect.varkw is None: + for k in args_dict.keys(): + if k not in func.__annotations__: + return Response(json.dumps({ + "invalid_argument_name": k, + "error": "unknown argument name" + }), status=400, mimetype='application/json') + for (arg_name, arg_type) in func.__annotations__.items(): + if arg_name == 'return': + continue + if not typecheck.check_type(args_dict[arg_name], arg_type): + return Response(json.dumps({ + "invalid_argument_name": arg_name, + "error": "invalid type", + "expected_type": str(arg_type), + "received_value": args_dict[arg_name] + }), status=400, mimetype='application/json') + js = json.dumps(func(**request.json)) + resp = Response(js, status=200, mimetype='application/json') + + return resp + + return service + + +app = Flask(__name__) + + + + +@app.route('/bark_typed', methods=['POST']) +@typed_service +def bark(name: str, number_of_barks: int = 3, friends: List[str] = []) -> Dict: + return {'number of barks': number_of_barks, + 'bark message': ' '.join(([name, "woof!"] * number_of_barks)), + 'my friends are': friends + } + + +@app.route('/bark', methods=['POST']) +def api_bark(): + js = json.dumps(bark("William", 4)) + + resp = Response(js, status=200, mimetype='application/json') + + return resp + +if __name__ == '__main__': + app.run()
0b547b69c9e603f77de6d8855a2fe1f153ba49d5
busshaming/fetch_realtime.py
busshaming/fetch_realtime.py
import os from datetime import datetime, timedelta import django import pytz import requests from google.transit import gtfs_realtime_pb2 django.setup() from busshaming.models import Feed, TripDate, RealtimeEntry, Stop GTFS_API_KEY = os.environ.get('TRANSPORT_NSW_API_KEY') def process_trip_update(trip_dates, stops, feed_tz, trip_update, threshold): trip = trip_update.trip key = (trip.trip_id, trip.start_date) # trip_date = trip_dates[key] if key not in trip_dates: print(trip) print("CAN'T FIND IN SCHEDULE: {}".format(key)) return trip_date = trip_dates[key] for stop_update in trip_update.stop_time_update: if stop_update.arrival.time < threshold: stop = stops[stop_update.stop_id] arrival_time = datetime.fromtimestamp(stop_update.arrival.time, feed_tz) departure_time = datetime.fromtimestamp(stop_update.departure.time, feed_tz) # Upsert RealtimeEntry RealtimeEntry.objects.upsert(trip_date.trip_id, stop.id, stop_update.stop_sequence, arrival_time, stop_update.arrival.delay, departure_time, stop_update.departure.delay) def fetch(): feed = Feed.objects.get(slug='nsw-buses') feed_tz = pytz.timezone(feed.timezone) stops = {} for stop in Stop.objects.filter(feed=feed): stops[stop.gtfs_stop_id] = stop trip_dates = {} today = datetime.now(tz=feed_tz).date() yesterday = today - timedelta(days=1) for trip_date in TripDate.objects.filter(date__gte=yesterday, date__lte=today).prefetch_related('trip'): datestr = trip_date.date.strftime('%Y%m%d') trip_dates[(trip_date.trip.gtfs_trip_id, datestr)] = trip_date feed_message = gtfs_realtime_pb2.FeedMessage() headers = {'Authorization': 'apikey ' + GTFS_API_KEY} print('fetching...') response = requests.get(feed.realtime_feed_url, headers=headers) print('fetching complete.') if response.status_code == 200: feed_message.ParseFromString(response.content) now = datetime.now(tz=feed_tz) threshold = int((now + timedelta(minutes=3)).timestamp()) for entity in feed_message.entity: if entity.HasField('trip_update'): process_trip_update(trip_dates, stops, feed_tz, entity.trip_update, threshold) else: print(response.status_code) print(response.content) if __name__ == '__main__': fetch()
Add script which logs realtime data into the db.
Add script which logs realtime data into the db.
Python
mit
katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming,katharosada/bus-shaming
--- +++ @@ -0,0 +1,65 @@ +import os +from datetime import datetime, timedelta + +import django +import pytz +import requests +from google.transit import gtfs_realtime_pb2 + +django.setup() + +from busshaming.models import Feed, TripDate, RealtimeEntry, Stop + +GTFS_API_KEY = os.environ.get('TRANSPORT_NSW_API_KEY') + + +def process_trip_update(trip_dates, stops, feed_tz, trip_update, threshold): + trip = trip_update.trip + key = (trip.trip_id, trip.start_date) + # trip_date = trip_dates[key] + if key not in trip_dates: + print(trip) + print("CAN'T FIND IN SCHEDULE: {}".format(key)) + return + trip_date = trip_dates[key] + for stop_update in trip_update.stop_time_update: + if stop_update.arrival.time < threshold: + stop = stops[stop_update.stop_id] + arrival_time = datetime.fromtimestamp(stop_update.arrival.time, feed_tz) + departure_time = datetime.fromtimestamp(stop_update.departure.time, feed_tz) + # Upsert RealtimeEntry + RealtimeEntry.objects.upsert(trip_date.trip_id, stop.id, stop_update.stop_sequence, arrival_time, stop_update.arrival.delay, departure_time, stop_update.departure.delay) + + +def fetch(): + feed = Feed.objects.get(slug='nsw-buses') + feed_tz = pytz.timezone(feed.timezone) + stops = {} + for stop in Stop.objects.filter(feed=feed): + stops[stop.gtfs_stop_id] = stop + trip_dates = {} + today = datetime.now(tz=feed_tz).date() + yesterday = today - timedelta(days=1) + for trip_date in TripDate.objects.filter(date__gte=yesterday, date__lte=today).prefetch_related('trip'): + datestr = trip_date.date.strftime('%Y%m%d') + trip_dates[(trip_date.trip.gtfs_trip_id, datestr)] = trip_date + + feed_message = gtfs_realtime_pb2.FeedMessage() + headers = {'Authorization': 'apikey ' + GTFS_API_KEY} + print('fetching...') + response = requests.get(feed.realtime_feed_url, headers=headers) + print('fetching complete.') + if response.status_code == 200: + feed_message.ParseFromString(response.content) + now = datetime.now(tz=feed_tz) + threshold = int((now + timedelta(minutes=3)).timestamp()) + for entity in feed_message.entity: + if entity.HasField('trip_update'): + process_trip_update(trip_dates, stops, feed_tz, entity.trip_update, threshold) + else: + print(response.status_code) + print(response.content) + + +if __name__ == '__main__': + fetch()
f226c81bbc7052dcac0993bacdaa4a93761b4dce
cvmfs/webapi/test-api.py
cvmfs/webapi/test-api.py
#! /usr/bin/env python # This tester listens on port 8051 for a single http request, with # a URL that starts with /api/v.... # It exits after one request. # It assumes that GeoIP is already installed on the current machine # with an installation of cvmfs-server, but reads the rest from # the current directory. from wsgiref.simple_server import make_server import sys sys.path.append('.') sys.path.append('/usr/share/cvmfs-server/webapi') from ctypes import cdll cdll.LoadLibrary('/usr/share/cvmfs-server/webapi/GeoIP.so') execfile('cvmfs-api.wsgi') import socket httpd = make_server( socket.gethostname(), # The host name. 8051, # A port number where to wait for the request. application # Our application object name, in this case a function. ) # Wait for a single request, serve it and quit. httpd.handle_request()
Add this little development tester for webapi
Add this little development tester for webapi
Python
bsd-3-clause
trshaffer/cvmfs,MicBrain/cvmfs,cvmfs-testing/cvmfs,alhowaidi/cvmfsNDN,Moliholy/cvmfs,alhowaidi/cvmfsNDN,trshaffer/cvmfs,DrDaveD/cvmfs,Moliholy/cvmfs,cvmfs/cvmfs,djw8605/cvmfs,reneme/cvmfs,MicBrain/cvmfs,cvmfs/cvmfs,Moliholy/cvmfs,trshaffer/cvmfs,cvmfs/cvmfs,alhowaidi/cvmfsNDN,cvmfs-testing/cvmfs,DrDaveD/cvmfs,Moliholy/cvmfs,djw8605/cvmfs,alhowaidi/cvmfsNDN,DrDaveD/cvmfs,cvmfs-testing/cvmfs,MicBrain/cvmfs,djw8605/cvmfs,DrDaveD/cvmfs,Gangbiao/cvmfs,Gangbiao/cvmfs,Gangbiao/cvmfs,MicBrain/cvmfs,trshaffer/cvmfs,Gangbiao/cvmfs,djw8605/cvmfs,cvmfs/cvmfs,DrDaveD/cvmfs,cvmfs/cvmfs,reneme/cvmfs,DrDaveD/cvmfs,MicBrain/cvmfs,reneme/cvmfs,cvmfs/cvmfs,cvmfs/cvmfs,Gangbiao/cvmfs,Moliholy/cvmfs,cvmfs-testing/cvmfs,trshaffer/cvmfs,cvmfs-testing/cvmfs,alhowaidi/cvmfsNDN,reneme/cvmfs,reneme/cvmfs,DrDaveD/cvmfs,djw8605/cvmfs
--- +++ @@ -0,0 +1,29 @@ +#! /usr/bin/env python + +# This tester listens on port 8051 for a single http request, with +# a URL that starts with /api/v.... +# It exits after one request. +# It assumes that GeoIP is already installed on the current machine +# with an installation of cvmfs-server, but reads the rest from +# the current directory. + +from wsgiref.simple_server import make_server + +import sys +sys.path.append('.') +sys.path.append('/usr/share/cvmfs-server/webapi') + +from ctypes import cdll +cdll.LoadLibrary('/usr/share/cvmfs-server/webapi/GeoIP.so') + +execfile('cvmfs-api.wsgi') + +import socket +httpd = make_server( + socket.gethostname(), # The host name. + 8051, # A port number where to wait for the request. + application # Our application object name, in this case a function. + ) + +# Wait for a single request, serve it and quit. +httpd.handle_request()
2dfd9cfc42e17f36446ff5da36e497bfff8d1d89
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Request.py
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Request.py
#!/usr/bin/env python # encoding=utf8 import requests from flexbe_core import EventState, Logger class Wonderland_Request(EventState): ''' MoveArm receive a ROS pose as input and launch a ROS service with the same pose ># url string url to call <= response string Finish job. ''' def __init__(self): # See example_state.py for basic explanations. super(Wonderland_Request, self).__init__(outcomes=['done', 'error'], input_keys=['url'], output_keys=['response']) self._header = {'api-key': 'asdf'} def execute(self, userdata): # This method is called periodically while the state is active. # Main purpose is to check state conditions and trigger a corresponding outcome. # If no outcome is returned, the state will stay active. try: response = requests.get(userdata.url, headers=self._header) except requests.exceptions.RequestException as e: print e return 'error' userdata.response = response.content return 'done' # One of the outcomes declared above.
Add a state for send requests to Wonderland.
Add a state for send requests to Wonderland.
Python
bsd-3-clause
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
--- +++ @@ -0,0 +1,36 @@ +#!/usr/bin/env python +# encoding=utf8 + +import requests +from flexbe_core import EventState, Logger + + +class Wonderland_Request(EventState): + ''' + MoveArm receive a ROS pose as input and launch a ROS service with the same pose + + ># url string url to call + <= response string Finish job. + + + ''' + + def __init__(self): + # See example_state.py for basic explanations. + super(Wonderland_Request, self).__init__(outcomes=['done', 'error'], + input_keys=['url'], + output_keys=['response']) + self._header = {'api-key': 'asdf'} + + def execute(self, userdata): + # This method is called periodically while the state is active. + # Main purpose is to check state conditions and trigger a corresponding outcome. + # If no outcome is returned, the state will stay active. + try: + response = requests.get(userdata.url, headers=self._header) + except requests.exceptions.RequestException as e: + print e + return 'error' + + userdata.response = response.content + return 'done' # One of the outcomes declared above.
1f6ec9185edfe3469c5ae0c991a308a06599bcd9
backend/globaleaks/db/migrations/update_22_23.py
backend/globaleaks/db/migrations/update_22_23.py
# -*- encoding: utf-8 -*- from storm.locals import Int, Bool, Unicode, DateTime, JSON, Reference, ReferenceSet from globaleaks.db.base_updater import TableReplacer from globaleaks.models import BaseModel, Model class InternalFile_v_22(Model): __storm_table__ = 'internalfile' internaltip_id = Unicode() name = Unicode() file_path = Unicode() content_type = Unicode() size = Int() new = Int() class Replacer2223(TableReplacer): def migrate_InternalFile(self): print "%s InternalFile migration assistant" % self.std_fancy old_objs = self.store_old.find(self.get_right_model("InternalFile", 22)) for old_obj in old_objs: new_obj = self.get_right_model("InternalFile", 23)() for _, v in new_obj._storm_columns.iteritems(): if v.name == 'processing_attempts': new_obj.processing_attempts = 0 continue setattr(new_obj, v.name, getattr(old_obj, v.name)) self.store_new.add(new_obj) self.store_new.commit()
Add migration script 22->23 (to be completed)
Add migration script 22->23 (to be completed)
Python
agpl-3.0
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
--- +++ @@ -0,0 +1,35 @@ +# -*- encoding: utf-8 -*- + +from storm.locals import Int, Bool, Unicode, DateTime, JSON, Reference, ReferenceSet +from globaleaks.db.base_updater import TableReplacer +from globaleaks.models import BaseModel, Model + + +class InternalFile_v_22(Model): + __storm_table__ = 'internalfile' + internaltip_id = Unicode() + name = Unicode() + file_path = Unicode() + content_type = Unicode() + size = Int() + new = Int() + + +class Replacer2223(TableReplacer): + def migrate_InternalFile(self): + print "%s InternalFile migration assistant" % self.std_fancy + + old_objs = self.store_old.find(self.get_right_model("InternalFile", 22)) + + for old_obj in old_objs: + new_obj = self.get_right_model("InternalFile", 23)() + for _, v in new_obj._storm_columns.iteritems(): + if v.name == 'processing_attempts': + new_obj.processing_attempts = 0 + continue + + setattr(new_obj, v.name, getattr(old_obj, v.name)) + + self.store_new.add(new_obj) + + self.store_new.commit()
604ce43cd9a66ae52224d174bc5743bcbfc86546
test/test_url_parser.py
test/test_url_parser.py
import pytest from lib.purl import Purl from lib.purl_exc import * class TestParserFunctions(object): def test_simple_url(self): str_url = 'http://blank' url = Purl(str_url) assert str(url) == str_url str_url = 'https://blank' url = Purl(str_url) assert str(url) == str_url str_url = 'http://blank.com' url = Purl(str_url) assert str(url) == str_url def test_invalid_url(self): with pytest.raises(InvalidUrlError): Purl('bad') with pytest.raises(InvalidUrlError): Purl('bad.com/abc/def') with pytest.raises(InvalidUrlError): Purl('http://bad:xwy/one/2/three') with pytest.raises(InvalidUrlError): Purl('http://bad://?hello') def test_url_mirrors_valid_inputs(self): str_url = 'http://blank:1234' url = Purl(str_url) assert str(url) == str_url str_url = 'file://blank/path/to/file' url = Purl(str_url) assert str(url) == str_url str_url = 'https://blank.com/resource/1' url = Purl(str_url) assert str(url) == str_url str_url = 'http://blank.org:1234/resouce/1/other' url = Purl(str_url) assert str(url) == str_url str_url = 'file://blank.org:1234/file.txt' url = Purl(str_url) assert str(url) == str_url def test_fields(self): url = Purl('sftp://secure-site:123') assert url.protocol == 'sftp://' assert url.hostname == 'secure-site' assert url.port == ':123' assert url.path == None url = Purl('http://nada.com') assert url.protocol == 'http://' assert url.hostname == 'nada.com' assert url.port == None assert url.path == None url = Purl('file://filesys/somefile.png') assert url.protocol == 'file://' assert url.hostname == 'filesys' assert url.port == None assert url.path == '/somefile.png'
Add url parsing unit tests
Add url parsing unit tests
Python
mit
ultrabluewolf/p.url
--- +++ @@ -0,0 +1,69 @@ +import pytest + +from lib.purl import Purl +from lib.purl_exc import * + +class TestParserFunctions(object): + + def test_simple_url(self): + str_url = 'http://blank' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'https://blank' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'http://blank.com' + url = Purl(str_url) + assert str(url) == str_url + + def test_invalid_url(self): + with pytest.raises(InvalidUrlError): + Purl('bad') + with pytest.raises(InvalidUrlError): + Purl('bad.com/abc/def') + with pytest.raises(InvalidUrlError): + Purl('http://bad:xwy/one/2/three') + with pytest.raises(InvalidUrlError): + Purl('http://bad://?hello') + + def test_url_mirrors_valid_inputs(self): + str_url = 'http://blank:1234' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'file://blank/path/to/file' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'https://blank.com/resource/1' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'http://blank.org:1234/resouce/1/other' + url = Purl(str_url) + assert str(url) == str_url + + str_url = 'file://blank.org:1234/file.txt' + url = Purl(str_url) + assert str(url) == str_url + + def test_fields(self): + url = Purl('sftp://secure-site:123') + assert url.protocol == 'sftp://' + assert url.hostname == 'secure-site' + assert url.port == ':123' + assert url.path == None + + url = Purl('http://nada.com') + assert url.protocol == 'http://' + assert url.hostname == 'nada.com' + assert url.port == None + assert url.path == None + + url = Purl('file://filesys/somefile.png') + assert url.protocol == 'file://' + assert url.hostname == 'filesys' + assert url.port == None + assert url.path == '/somefile.png'
4a60fdf4896a41d52fc90e8a5f719976e605e8cc
lazy_helpers.py
lazy_helpers.py
# Lazy objects, for the serializer to find them we put them here class LazyDriver(object): _driver = None def get(self): if self._driver is None: from selenium import webdriver self._driver = webdriver.Firefox() return self._driver class LazyPool(object): _pool = None @classmethod def get(cls): if cls._pool is None: import urllib3 cls._pool = urllib3.PoolManager() return cls._pool
Move the lazy classes out for the serializer to be able to find them
Move the lazy classes out for the serializer to be able to find them
Python
apache-2.0
holdenk/diversity-analytics,holdenk/diversity-analytics
--- +++ @@ -0,0 +1,21 @@ +# Lazy objects, for the serializer to find them we put them here + +class LazyDriver(object): + _driver = None + + def get(self): + if self._driver is None: + from selenium import webdriver + self._driver = webdriver.Firefox() + return self._driver + + +class LazyPool(object): + _pool = None + + @classmethod + def get(cls): + if cls._pool is None: + import urllib3 + cls._pool = urllib3.PoolManager() + return cls._pool
d247427d60944d529fa17865ac4e0556a9ccda3f
tools/telemetry/telemetry/page/actions/navigate.py
tools/telemetry/telemetry/page/actions/navigate.py
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page.actions import page_action class NavigateAction(page_action.PageAction): def __init__(self, attributes=None): super(NavigateAction, self).__init__(attributes) def RunAction(self, page, tab): if page.is_file: target_side_url = tab.browser.http_server.UrlOf(page.file_path_url) else: target_side_url = page.url tab.Navigate(target_side_url, page.script_to_evaluate_on_commit) tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.page.actions import page_action class NavigateAction(page_action.PageAction): def __init__(self, attributes=None): super(NavigateAction, self).__init__(attributes) def RunAction(self, page, tab): if page.is_file: target_side_url = tab.browser.http_server.UrlOf(page.file_path_url) else: target_side_url = page.url if hasattr(self, 'timeout_seconds') and self.timeout_seconds: tab.Navigate(target_side_url, page.script_to_evaluate_on_commit, self.timeout_seconds) else: tab.Navigate(target_side_url, page.script_to_evaluate_on_commit) tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
Add a timeout attr to NavigateAction.
Add a timeout attr to NavigateAction. BUG=320748 Review URL: https://codereview.chromium.org/202483006 git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@257922 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
dushu1203/chromium.src,Just-D/chromium-1,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,ondra-novak/chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,crosswalk-project/chromium-crosswalk-efl,ltilve/chromium,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Jonekee/chromium.src,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,Fireblend/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,jaruba/chromium.src,dushu1203/chromium.src,M4sse/chromium.src,axinging/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,littlstar/chromium.src,ltilve/chromium,dednal/chromium.src,ondra-novak/chromium.src,littlstar/chromium.src,markYoungH/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,littlstar/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,Chilledheart/chromium,patrickm/chromium.src,M4sse/chromium.src,patrickm/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,Chilledheart/chromium,fujunwei/chromium-crosswalk,dushu1203/chromium.src,ltilve/chromium,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,littlstar/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,patrickm/chromium.src,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,Fireblend/chromium-crosswalk,markYoungH/chromium.src,chuan9/chromium-crosswalk,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,M4sse/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,patrickm/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,ltilve/chromium,dushu1203/chromium.src,dednal/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,M4sse/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,dednal/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,markYoungH/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,Just-D/chromium-1,chuan9/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,hgl888/chromium-crosswalk,krieger-od/nwjs_chromium.src,dednal/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,fujunwei/chromium-crosswalk,Jonekee/chromium.src,markYoungH/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,fujunwei/chromium-crosswalk,jaruba/chromium.src,dednal/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,jaruba/chromium.src,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,Jonekee/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,markYoungH/chromium.src,Chilledheart/chromium,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,patrickm/chromium.src,ltilve/chromium,markYoungH/chromium.src,littlstar/chromium.src,axinging/chromium-crosswalk,dednal/chromium.src,axinging/chromium-crosswalk,Chilledheart/chromium,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,dednal/chromium.src,patrickm/chromium.src,Fireblend/chromium-crosswalk,ltilve/chromium,Just-D/chromium-1,littlstar/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,Chilledheart/chromium,krieger-od/nwjs_chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,ondra-novak/chromium.src,patrickm/chromium.src,chuan9/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src
--- +++ @@ -14,5 +14,10 @@ else: target_side_url = page.url - tab.Navigate(target_side_url, page.script_to_evaluate_on_commit) + if hasattr(self, 'timeout_seconds') and self.timeout_seconds: + tab.Navigate(target_side_url, + page.script_to_evaluate_on_commit, + self.timeout_seconds) + else: + tab.Navigate(target_side_url, page.script_to_evaluate_on_commit) tab.WaitForDocumentReadyStateToBeInteractiveOrBetter()
ee66b13c952118f85e0ce14264da29807e8ab814
trunk/examples/gaussian_smoother.py
trunk/examples/gaussian_smoother.py
import matplotlib.pyplot as plt x = np.linspace(-10, 10, 40) y = np.linspace(-15, 15, 60) Y,X = np.meshgrid(y,x) noise = np.random.randn(*X.shape) * 10 data = X**2 + Y**2 + noise data = np.ma.array(data, mask=((X**2 + Y**2) < 0.4)) data_filt = gaussian_filter(x, y, data, 4, 4) plt.subplot(1, 2, 1) plt.imshow(data.T, interpolation='nearest', extent=(x.min(), x.max(), y.min(), y.max())) plt.subplot(1, 2, 2) plt.imshow(data_filt.T, interpolation='nearest', extent=(x.min(), x.max(), y.min(), y.max())) plt.show()
Add example using the gaussian smoother/filter.
Add example using the gaussian smoother/filter. git-svn-id: acf0ef94bfce630b1a882387fc03ab8593ec6522@292 150532fb-1d5b-0410-a8ab-efec50f980d4
Python
bsd-3-clause
dopplershift/MetPy,dopplershift/MetPy,deeplycloudy/MetPy,jrleeman/MetPy,ahaberlie/MetPy,ahaberlie/MetPy,ahill818/MetPy,Unidata/MetPy,jrleeman/MetPy,Unidata/MetPy,ShawnMurd/MetPy
--- +++ @@ -0,0 +1,20 @@ +import matplotlib.pyplot as plt + +x = np.linspace(-10, 10, 40) +y = np.linspace(-15, 15, 60) +Y,X = np.meshgrid(y,x) +noise = np.random.randn(*X.shape) * 10 +data = X**2 + Y**2 + noise +data = np.ma.array(data, mask=((X**2 + Y**2) < 0.4)) + +data_filt = gaussian_filter(x, y, data, 4, 4) + +plt.subplot(1, 2, 1) +plt.imshow(data.T, interpolation='nearest', + extent=(x.min(), x.max(), y.min(), y.max())) + +plt.subplot(1, 2, 2) +plt.imshow(data_filt.T, interpolation='nearest', + extent=(x.min(), x.max(), y.min(), y.max())) + +plt.show()
62955786e7ffd2e961849860dfd2146ef611890c
src/logfile_values.py
src/logfile_values.py
#!/usr/bin/env python # # logfile_values.py # # Copyright (c) 2017, InnoGames GmbH # """ logfile_values.py -- a python script to find metrics values in log file This script is using last line of log file to get metric value by column number python logfile_values.py --metric="metric1:1" --metric="metric2:2" ... """ from argparse import ArgumentParser, ArgumentTypeError from time import time class Metric: def __init__(self, arg): if ':' not in arg: raise ArgumentTypeError('Argument must have ":"') self.name, column = arg.split(':', 1) if not column.isdecimal(): raise ArgumentTypeError('Column must be a number') self.column = int(column) def parse_args(): parser = ArgumentParser() parser.add_argument('--prefix', default='logfile_values') parser.add_argument('--file', default='/var/log/messages') parser.add_argument('--metric', type=Metric, action='append') return parser.parse_args() def main(): args = parse_args() template = args.prefix + '.{} {} ' + str(int(time())) with open(args.file, 'r') as f: for line in f: pass last_line = line.split() for m in args.metric: print(template.format(m.name, last_line[m.column])) if __name__ == '__main__': main()
Add plugin to parse values from log file
Add plugin to parse values from log file
Python
mit
innogames/igcollect
--- +++ @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# +# logfile_values.py +# +# Copyright (c) 2017, InnoGames GmbH +# +""" +logfile_values.py -- a python script to find metrics values in log file + +This script is using last line of log file to get metric value by column number + +python logfile_values.py --metric="metric1:1" --metric="metric2:2" ... +""" + +from argparse import ArgumentParser, ArgumentTypeError +from time import time + + +class Metric: + def __init__(self, arg): + if ':' not in arg: + raise ArgumentTypeError('Argument must have ":"') + self.name, column = arg.split(':', 1) + if not column.isdecimal(): + raise ArgumentTypeError('Column must be a number') + self.column = int(column) + + +def parse_args(): + parser = ArgumentParser() + parser.add_argument('--prefix', default='logfile_values') + parser.add_argument('--file', default='/var/log/messages') + parser.add_argument('--metric', type=Metric, action='append') + return parser.parse_args() + + +def main(): + args = parse_args() + template = args.prefix + '.{} {} ' + str(int(time())) + with open(args.file, 'r') as f: + for line in f: + pass + last_line = line.split() + for m in args.metric: + print(template.format(m.name, last_line[m.column])) + + +if __name__ == '__main__': + main()
a3dc1ebac114d1591dd9cdb211e6d975a10b0da3
education/management/commands/reschedule_teacher_weekly_polls.py
education/management/commands/reschedule_teacher_weekly_polls.py
''' Created on Feb 21, 2013 @author: raybesiga ''' from django.core.management.base import BaseCommand from education.models import reschedule_teacher_weekly_polls from optparse import OptionParser, make_option class Command(BaseCommand): option_list = BaseCommand.option_list + ( make_option("-g", "--group", dest="group"), ) def handle(self, **options): if not options['group']: group = raw_input('For which group? -- Teachers, SMC, Head Teachers, All:') else: group = options['group'] group = None if options['group'] == 'All' else group reschedule_teacher_weekly_polls(grp=group) self.stdout.write('') self.stdout.write('Done')
Add new reschedule teacher weekly poll
Add new reschedule teacher weekly poll
Python
bsd-3-clause
unicefuganda/edtrac,unicefuganda/edtrac,unicefuganda/edtrac
--- +++ @@ -0,0 +1,24 @@ +''' +Created on Feb 21, 2013 + +@author: raybesiga +''' + +from django.core.management.base import BaseCommand +from education.models import reschedule_teacher_weekly_polls +from optparse import OptionParser, make_option + +class Command(BaseCommand): + + option_list = BaseCommand.option_list + ( + make_option("-g", "--group", dest="group"), + ) + def handle(self, **options): + if not options['group']: + group = raw_input('For which group? -- Teachers, SMC, Head Teachers, All:') + else: + group = options['group'] + group = None if options['group'] == 'All' else group + reschedule_teacher_weekly_polls(grp=group) + self.stdout.write('') + self.stdout.write('Done')
d64e576e74ddc68364259ed4ca941165a9038a56
tests/test_digitdestroyer.py
tests/test_digitdestroyer.py
from unittest import TestCase from spicedham.digitdestroyer import DigitDestroyer class TestDigitDestroyer(TestCase): def test_classify(self): dd = DigitDestroyer() dd.filter_match = 1 dd.filter_miss = 0 match_message = ['1', '2', '3', '1', '1'] miss_message = ['a', '100'] self.assertEqual(dd.classify('tag', match_message), 1) self.assertEqual(dd.classify('tag', miss_message), 0)
Add a test for the digitdestroyer filter
Add a test for the digitdestroyer filter
Python
mpl-2.0
mozilla/spicedham,mozilla/spicedham
--- +++ @@ -0,0 +1,15 @@ +from unittest import TestCase + +from spicedham.digitdestroyer import DigitDestroyer + +class TestDigitDestroyer(TestCase): + + def test_classify(self): + dd = DigitDestroyer() + dd.filter_match = 1 + dd.filter_miss = 0 + match_message = ['1', '2', '3', '1', '1'] + miss_message = ['a', '100'] + self.assertEqual(dd.classify('tag', match_message), 1) + self.assertEqual(dd.classify('tag', miss_message), 0) +
0fd33596d292f758a463f95dbbbcbbd729fd15cb
datatools/scripts/terms_from_marcframe.py
datatools/scripts/terms_from_marcframe.py
import json def get_terms(marcframe): terms = set() for k, v in marcframe['entityTypeMap'].items(): terms.add(k) terms.update(v.get('instanceTypes', [])) dfn_keys = {'property', 'addProperty', 'link', 'addLink', 'domainEntity', 'rangeEntity'} def add_terms(dfn): for k, v in dfn.items(): if k in dfn_keys: terms.add(v) elif isinstance(v, dict): add_terms(v) elif k == 'defaults': terms.update(v) for part in ['bib', 'auth', 'hold']: for field in marcframe[part].values(): add_terms(field) return terms if __name__ == '__main__': import sys source = sys.argv[1] with open(source) as fp: marcframe = json.load(fp) terms = get_terms(marcframe) for term in sorted(terms): print term
Make simple script for finding terms used in marcframe
Make simple script for finding terms used in marcframe
Python
apache-2.0
libris/librisxl,libris/librisxl,libris/librisxl
--- +++ @@ -0,0 +1,29 @@ +import json + +def get_terms(marcframe): + terms = set() + for k, v in marcframe['entityTypeMap'].items(): + terms.add(k) + terms.update(v.get('instanceTypes', [])) + dfn_keys = {'property', 'addProperty', 'link', 'addLink', 'domainEntity', 'rangeEntity'} + def add_terms(dfn): + for k, v in dfn.items(): + if k in dfn_keys: + terms.add(v) + elif isinstance(v, dict): + add_terms(v) + elif k == 'defaults': + terms.update(v) + for part in ['bib', 'auth', 'hold']: + for field in marcframe[part].values(): + add_terms(field) + return terms + +if __name__ == '__main__': + import sys + source = sys.argv[1] + with open(source) as fp: + marcframe = json.load(fp) + terms = get_terms(marcframe) + for term in sorted(terms): + print term
64bb470bc58d6d467f4bd807f82cc56aa5e674bc
nova/tests/test_sqlalchemy.py
nova/tests/test_sqlalchemy.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for SQLAlchemy specific code.""" from eventlet import db_pool try: import MySQLdb except ImportError: MySQLdb = None from nova import context from nova.db.sqlalchemy import session from nova import test class DbPoolTestCase(test.TestCase): def setUp(self): super(DbPoolTestCase, self).setUp() self.flags(sql_dbpool_enable=True) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) if not MySQLdb: self.skipTest("Unable to test due to lack of MySQLdb") def test_db_pool_option(self): self.flags(sql_idle_timeout=11, sql_min_pool_size=21, sql_max_pool_size=42) info = {} class FakeConnectionPool(db_pool.ConnectionPool): def __init__(self, mod_name, **kwargs): info['module'] = mod_name info['kwargs'] = kwargs super(FakeConnectionPool, self).__init__(mod_name, **kwargs) def connect(self, *args, **kwargs): raise test.TestingException() self.stubs.Set(db_pool, 'ConnectionPool', FakeConnectionPool) sql_connection = 'mysql://user:pass@127.0.0.1/nova' self.assertRaises(test.TestingException, session.create_engine, sql_connection) self.assertEqual(info['module'], MySQLdb) self.assertEqual(info['kwargs']['max_idle'], 11) self.assertEqual(info['kwargs']['min_size'], 21) self.assertEqual(info['kwargs']['max_size'], 42)
Add eventlet db_pool use for mysql
Add eventlet db_pool use for mysql This adds the use of eventlet's db_pool module so that we can make mysql calls without blocking the whole process. New config options are introduced: sql_dbpool_enable -- Enables the use of eventlet's db_pool sql_min_pool_size -- Set the minimum number of SQL connections The default for sql_dbpool_enable is False for now, so there is no forced behavior changes for those using mysql. sql_min_pool_size is defaulted to 1 to match behavior if not using db_pool. Adds a new test module for our sqlalchemy code, testing this new option as much as is possible without requiring mysql server to be running. DocImpact Change-Id: I99833f447df05c1beba5a3925b201dfccca72cae
Python
apache-2.0
n0ano/gantt,n0ano/gantt
--- +++ @@ -0,0 +1,66 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# Copyright (c) 2012 Rackspace Hosting +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for SQLAlchemy specific code.""" + +from eventlet import db_pool +try: + import MySQLdb +except ImportError: + MySQLdb = None + +from nova import context +from nova.db.sqlalchemy import session +from nova import test + + +class DbPoolTestCase(test.TestCase): + def setUp(self): + super(DbPoolTestCase, self).setUp() + self.flags(sql_dbpool_enable=True) + self.user_id = 'fake' + self.project_id = 'fake' + self.context = context.RequestContext(self.user_id, self.project_id) + if not MySQLdb: + self.skipTest("Unable to test due to lack of MySQLdb") + + def test_db_pool_option(self): + self.flags(sql_idle_timeout=11, sql_min_pool_size=21, + sql_max_pool_size=42) + + info = {} + + class FakeConnectionPool(db_pool.ConnectionPool): + def __init__(self, mod_name, **kwargs): + info['module'] = mod_name + info['kwargs'] = kwargs + super(FakeConnectionPool, self).__init__(mod_name, + **kwargs) + + def connect(self, *args, **kwargs): + raise test.TestingException() + + self.stubs.Set(db_pool, 'ConnectionPool', + FakeConnectionPool) + + sql_connection = 'mysql://user:pass@127.0.0.1/nova' + self.assertRaises(test.TestingException, session.create_engine, + sql_connection) + + self.assertEqual(info['module'], MySQLdb) + self.assertEqual(info['kwargs']['max_idle'], 11) + self.assertEqual(info['kwargs']['min_size'], 21) + self.assertEqual(info['kwargs']['max_size'], 42)
f10b89d8c2b847555223a4a025d78e1223f57696
scripts/fork_my_feedstocks.py
scripts/fork_my_feedstocks.py
#!/usr/bin/env conda-execute """ This script can be run to fork conda-forge feedstocks to which you are a maintainer. This is super useful if you maintain many feedstocks and would like to cutdown maintenance on your next PR... Requires a token stored in the environment variable `GH_TOKEN` with the permissions `public_repo` and `read:org`. It also requires all the feedstocks be cloned somewhere like with the `feedstocks` repo. """ # conda execute # env: # - git # - python # - conda-smithy # - pygithub # - gitpython # channels: # - conda-forge # run_with: python import os import argparse import git import github from conda_build.metadata import MetaData import conda_smithy.github import conda_smithy.configure_feedstock import conda_smithy import conda_smithy.feedstocks as feedstocks parser = argparse.ArgumentParser(description="Fork your maintained feedstocks.") parser.add_argument("--feedstocks-dir", help="The location of the feedstocks.", default="~/Developer/Conda/conda-forge/feedstocks/feedstocks") args = parser.parse_args() feedstocks_dir = os.path.abspath(os.path.expanduser(args.feedstocks_dir)) gh_token = os.environ['GH_TOKEN'] gh = github.Github(gh_token) gh_me = gh.get_user() gh_org = gh.get_organization("conda-forge") if gh_me.login == 'conda-forge-admin': raise ValueError("Please don't run this script with the github " "token for {}.".format(gh_me.login)) for each_feedstock in os.listdir(feedstocks_dir): each_feedstock_dir = os.path.join(feedstocks_dir, each_feedstock) meta = os.path.join(each_feedstock_dir, 'recipe') if not os.path.exists(meta): print('Found an empty repo... :(') continue meta = MetaData(meta) me_a_maintainer = gh_me.login in meta.meta.get('extra', {}).get('recipe-maintainers', []) print(' - {: <24}(maintainer: {})'.format(each_feedstock, me_a_maintainer)) if me_a_maintainer: print('*** Forking `{}`.'.format(each_feedstock)) each_feedstock_repo = each_feedstock if not each_feedstock_repo.endswith("-feedstock"): each_feedstock_repo += "-feedstock" repo = gh_org.get_repo(each_feedstock_repo) gh_me.create_fork(repo)
Add a script to fork all feedstocks one is a maintainer on.
scripts: Add a script to fork all feedstocks one is a maintainer on.
Python
bsd-3-clause
conda-forge/conda-forge.github.io,conda-forge/conda-forge.github.io,conda-forge/conda-forge.github.io,conda-forge/conda-forge.github.io
--- +++ @@ -0,0 +1,66 @@ +#!/usr/bin/env conda-execute + +""" +This script can be run to fork conda-forge feedstocks to which you are a maintainer. +This is super useful if you maintain many feedstocks and would like to cutdown maintenance on your next PR... + +Requires a token stored in the environment variable `GH_TOKEN` with the permissions `public_repo` and `read:org`. +It also requires all the feedstocks be cloned somewhere like with the `feedstocks` repo. +""" +# conda execute +# env: +# - git +# - python +# - conda-smithy +# - pygithub +# - gitpython +# channels: +# - conda-forge +# run_with: python + +import os +import argparse + +import git +import github + +from conda_build.metadata import MetaData + +import conda_smithy.github +import conda_smithy.configure_feedstock +import conda_smithy +import conda_smithy.feedstocks as feedstocks + + +parser = argparse.ArgumentParser(description="Fork your maintained feedstocks.") +parser.add_argument("--feedstocks-dir", help="The location of the feedstocks.", + default="~/Developer/Conda/conda-forge/feedstocks/feedstocks") +args = parser.parse_args() + +feedstocks_dir = os.path.abspath(os.path.expanduser(args.feedstocks_dir)) + +gh_token = os.environ['GH_TOKEN'] +gh = github.Github(gh_token) +gh_me = gh.get_user() +gh_org = gh.get_organization("conda-forge") + +if gh_me.login == 'conda-forge-admin': + raise ValueError("Please don't run this script with the github " + "token for {}.".format(gh_me.login)) + +for each_feedstock in os.listdir(feedstocks_dir): + each_feedstock_dir = os.path.join(feedstocks_dir, each_feedstock) + meta = os.path.join(each_feedstock_dir, 'recipe') + if not os.path.exists(meta): + print('Found an empty repo... :(') + continue + meta = MetaData(meta) + me_a_maintainer = gh_me.login in meta.meta.get('extra', {}).get('recipe-maintainers', []) + print(' - {: <24}(maintainer: {})'.format(each_feedstock, me_a_maintainer)) + if me_a_maintainer: + print('*** Forking `{}`.'.format(each_feedstock)) + each_feedstock_repo = each_feedstock + if not each_feedstock_repo.endswith("-feedstock"): + each_feedstock_repo += "-feedstock" + repo = gh_org.get_repo(each_feedstock_repo) + gh_me.create_fork(repo)
773a389f2ae69384f09cbada8b4b2615a7c430de
celery/tests/test_messaging.py
celery/tests/test_messaging.py
import unittest from celery.messaging import MSG_OPTIONS, get_msg_options, extract_msg_options class TestMsgOptions(unittest.TestCase): def test_MSG_OPTIONS(self): self.assertTrue(MSG_OPTIONS) def test_extract_msg_options(self): testing = {"mandatory": True, "routing_key": "foo.xuzzy"} result = extract_msg_options(testing) self.assertEquals(result["mandatory"], True) self.assertEquals(result["routing_key"], "foo.xuzzy")
Add regression test for the message options bug.
Add regression test for the message options bug.
Python
bsd-3-clause
frac/celery,cbrepo/celery,frac/celery,ask/celery,ask/celery,WoLpH/celery,mitsuhiko/celery,cbrepo/celery,mitsuhiko/celery,WoLpH/celery
--- +++ @@ -0,0 +1,16 @@ +import unittest +from celery.messaging import MSG_OPTIONS, get_msg_options, extract_msg_options + + +class TestMsgOptions(unittest.TestCase): + + def test_MSG_OPTIONS(self): + self.assertTrue(MSG_OPTIONS) + + def test_extract_msg_options(self): + testing = {"mandatory": True, "routing_key": "foo.xuzzy"} + result = extract_msg_options(testing) + self.assertEquals(result["mandatory"], True) + self.assertEquals(result["routing_key"], "foo.xuzzy") + +
4448f88734a3fb631a02aeb9b84675575226845d
examples/matplotlib/matplotlib_example.py
examples/matplotlib/matplotlib_example.py
# Copyright 2013 Christoph Reiter # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. import sys sys.path.insert(0, '../..') # FIXME... :) import gc gc.disable() import cairocffi cairocffi.install_as_pycairo() import pgi pgi.install_as_gi() import matplotlib matplotlib.use('GTK3Cairo') from matplotlib import pyplot import math pyplot.plot([math.sin(x / 100.0) for x in range(1000)]) pyplot.show()
Add a matplotlib example (needs cffi)
Add a matplotlib example (needs cffi)
Python
lgpl-2.1
lazka/pgi,lazka/pgi
--- +++ @@ -0,0 +1,29 @@ +# Copyright 2013 Christoph Reiter +# +# This library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. + +import sys +sys.path.insert(0, '../..') + +# FIXME... :) +import gc +gc.disable() + +import cairocffi +cairocffi.install_as_pycairo() + +import pgi +pgi.install_as_gi() + +import matplotlib +matplotlib.use('GTK3Cairo') +from matplotlib import pyplot + +import math + + +pyplot.plot([math.sin(x / 100.0) for x in range(1000)]) +pyplot.show()
8d9eae677ef81ba3dcb000e528985276a920ef05
test/test_loader.py
test/test_loader.py
from .helper import BJOTest from bernard.actors import Locker, Notifier from bernard.loader import YAMLLoader from praw.models import Comment, Submission class TestValidation(BJOTest): def setUp(self): super().setUp() self.loader = YAMLLoader(self.db, self.cur, self.subreddit) def test_bad_param_type(self): params = {'text': 3} with self.assertRaises(RuntimeError): self.loader.validate_subactor_config(Notifier, params, []) def test_good_param_type(self): params = {'text': "foobar"} self.loader.validate_subactor_config(Notifier, params, []) def test_bad_target_type(self): with self.assertRaises(RuntimeError): self.loader.validate_subactor_config(Locker, {}, [Comment]) def test_good_target_type(self): self.loader.validate_subactor_config(Locker, {}, [Submission])
Add some tests for the new validation logic
Add some tests for the new validation logic
Python
mit
leviroth/bernard
--- +++ @@ -0,0 +1,26 @@ +from .helper import BJOTest +from bernard.actors import Locker, Notifier +from bernard.loader import YAMLLoader +from praw.models import Comment, Submission + + +class TestValidation(BJOTest): + def setUp(self): + super().setUp() + self.loader = YAMLLoader(self.db, self.cur, self.subreddit) + + def test_bad_param_type(self): + params = {'text': 3} + with self.assertRaises(RuntimeError): + self.loader.validate_subactor_config(Notifier, params, []) + + def test_good_param_type(self): + params = {'text': "foobar"} + self.loader.validate_subactor_config(Notifier, params, []) + + def test_bad_target_type(self): + with self.assertRaises(RuntimeError): + self.loader.validate_subactor_config(Locker, {}, [Comment]) + + def test_good_target_type(self): + self.loader.validate_subactor_config(Locker, {}, [Submission])
d727758e3db52327e7326b5f8546ecde06d409e7
test/test_logger.py
test/test_logger.py
# encoding: utf-8 """ .. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> """ from __future__ import print_function from __future__ import unicode_literals from dataproperty import ( set_logger, set_log_level, ) import logbook import pytest class Test_set_logger(object): @pytest.mark.parametrize(["value"], [ [True], [False], ]) def test_smoke(self, value): set_logger(value) class Test_set_log_level(object): @pytest.mark.parametrize(["value"], [ [logbook.CRITICAL], [logbook.ERROR], [logbook.WARNING], [logbook.NOTICE], [logbook.INFO], [logbook.DEBUG], [logbook.TRACE], [logbook.NOTSET], ]) def test_smoke(self, value): set_log_level(value) @pytest.mark.parametrize(["value", "expected"], [ [None, LookupError], ["unexpected", LookupError], ]) def test_exception(self, value, expected): with pytest.raises(expected): set_log_level(value)
Add test cases for the logger
Add test cases for the logger
Python
mit
thombashi/DataProperty
--- +++ @@ -0,0 +1,49 @@ +# encoding: utf-8 + +""" +.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com> +""" + +from __future__ import print_function +from __future__ import unicode_literals + +from dataproperty import ( + set_logger, + set_log_level, +) +import logbook +import pytest + + +class Test_set_logger(object): + + @pytest.mark.parametrize(["value"], [ + [True], + [False], + ]) + def test_smoke(self, value): + set_logger(value) + + +class Test_set_log_level(object): + + @pytest.mark.parametrize(["value"], [ + [logbook.CRITICAL], + [logbook.ERROR], + [logbook.WARNING], + [logbook.NOTICE], + [logbook.INFO], + [logbook.DEBUG], + [logbook.TRACE], + [logbook.NOTSET], + ]) + def test_smoke(self, value): + set_log_level(value) + + @pytest.mark.parametrize(["value", "expected"], [ + [None, LookupError], + ["unexpected", LookupError], + ]) + def test_exception(self, value, expected): + with pytest.raises(expected): + set_log_level(value)
0200c03f8f6232965f924a765c5ebb0f9c439f4d
sample_app/forms.py
sample_app/forms.py
from flask_wtf import Form from wtforms.fields import (TextField, SubmitField, BooleanField, DateField, DateTimeField) from wtforms.validators import Required class SignupForm(Form): name = TextField(u'Your name', validators=[Required()]) birthday = DateField(u'Your birthday') now = DateTimeField(u'Current time', description='...for no particular reason') eula = BooleanField(u'I did not read the terms and conditions', validators=[Required('You must agree to not agree!')]) submit = SubmitField(u'Signup')
from flask_wtf import Form from wtforms.fields import (TextField, SubmitField, BooleanField, DateField, DateTimeField) from wtforms.validators import Required, Email class SignupForm(Form): name = TextField(u'Your name', validators=[Required()]) email = TextField(u'Your email address', validators=[Email()]) birthday = DateField(u'Your birthday') now = DateTimeField(u'Current time', description='...for no particular reason') eula = BooleanField(u'I did not read the terms and conditions', validators=[Required('You must agree to not agree!')]) submit = SubmitField(u'Signup')
Add email field to sample app.
Add email field to sample app.
Python
apache-2.0
vishnugonela/flask-bootstrap,BeardedSteve/flask-bootstrap,livepy/flask-bootstrap,suvorom/flask-bootstrap,vishnugonela/flask-bootstrap,JingZhou0404/flask-bootstrap,vishnugonela/flask-bootstrap,BeardedSteve/flask-bootstrap,suvorom/flask-bootstrap,eshijia/flask-bootstrap,JingZhou0404/flask-bootstrap,moha24/flask-bootstrap,victorbjorklund/flask-bootstrap,scorpiovn/flask-bootstrap,JingZhou0404/flask-bootstrap,moha24/flask-bootstrap,BeardedSteve/flask-bootstrap,livepy/flask-bootstrap,victorbjorklund/flask-bootstrap,Coxious/flask-bootstrap,Coxious/flask-bootstrap,Coxious/flask-bootstrap,suvorom/flask-bootstrap,victorbjorklund/flask-bootstrap,scorpiovn/flask-bootstrap,eshijia/flask-bootstrap,moha24/flask-bootstrap,scorpiovn/flask-bootstrap,eshijia/flask-bootstrap,livepy/flask-bootstrap
--- +++ @@ -1,11 +1,12 @@ from flask_wtf import Form from wtforms.fields import (TextField, SubmitField, BooleanField, DateField, DateTimeField) -from wtforms.validators import Required +from wtforms.validators import Required, Email class SignupForm(Form): name = TextField(u'Your name', validators=[Required()]) + email = TextField(u'Your email address', validators=[Email()]) birthday = DateField(u'Your birthday') now = DateTimeField(u'Current time', description='...for no particular reason')
e97649a29a10ecc06eaa33b0898b2c22368e7102
tests/tests_list.py
tests/tests_list.py
#List of input files and reference databases sim_files = [("./inputs/physor/1_Enrichment_2_Reactor.xml", "./benchmarks/physor_1_Enrichment_2_Reactor.h5"), ("./inputs/physor/2_Sources_3_Reactors.xml", "./benchmarks/physor_2_Sources_3_Reactors.h5")]
Add python file with a list of simulation files.
Add python file with a list of simulation files.
Python
bsd-3-clause
Baaaaam/cycamore,rwcarlsen/cycamore,Baaaaam/cyBaM,gonuke/cycamore,rwcarlsen/cycamore,gonuke/cycamore,cyclus/cycaless,Baaaaam/cyBaM,Baaaaam/cyCLASS,rwcarlsen/cycamore,Baaaaam/cyBaM,Baaaaam/cycamore,rwcarlsen/cycamore,gonuke/cycamore,Baaaaam/cyCLASS,jlittell/cycamore,Baaaaam/cyBaM,jlittell/cycamore,jlittell/cycamore,gonuke/cycamore,cyclus/cycaless,Baaaaam/cycamore,jlittell/cycamore
--- +++ @@ -0,0 +1,4 @@ + +#List of input files and reference databases +sim_files = [("./inputs/physor/1_Enrichment_2_Reactor.xml", "./benchmarks/physor_1_Enrichment_2_Reactor.h5"), + ("./inputs/physor/2_Sources_3_Reactors.xml", "./benchmarks/physor_2_Sources_3_Reactors.h5")]
8ce21d0d060fcaaea192f002d12c79101f4bc1a2
corehq/apps/commtrack/management/commands/fix_default_program.py
corehq/apps/commtrack/management/commands/fix_default_program.py
from django.core.management.base import BaseCommand from corehq.apps.commtrack.models import Program from corehq.apps.domain.models import Domain from corehq.apps.commtrack.util import get_or_create_default_program class Command(BaseCommand): help = 'Populate default program flag for domains' def handle(self, *args, **options): self.stdout.write("Fixing default programs...\n") for domain in Domain.get_all(): if not domain.commtrack_enabled: continue if Program.default_for_domain(domain.name): continue programs = Program.by_domain(domain.name) # filter anything named 'default' or 'Default' current_default = [ p for p in programs if p.name == 'Default' or p.name == 'default' ] # if they never changed their default programs # name, we don't want to add a confusing new one # so just flip this to the default if len(current_default) == 1: p.default = True p.save() else: get_or_create_default_program(domain.name)
Add management command to migrate programs
Add management command to migrate programs
Python
bsd-3-clause
dimagi/commcare-hq,puttarajubr/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,35 @@ +from django.core.management.base import BaseCommand +from corehq.apps.commtrack.models import Program +from corehq.apps.domain.models import Domain +from corehq.apps.commtrack.util import get_or_create_default_program + + +class Command(BaseCommand): + help = 'Populate default program flag for domains' + + def handle(self, *args, **options): + self.stdout.write("Fixing default programs...\n") + + for domain in Domain.get_all(): + if not domain.commtrack_enabled: + continue + + if Program.default_for_domain(domain.name): + continue + + programs = Program.by_domain(domain.name) + + # filter anything named 'default' or 'Default' + current_default = [ + p for p in programs + if p.name == 'Default' or p.name == 'default' + ] + + # if they never changed their default programs + # name, we don't want to add a confusing new one + # so just flip this to the default + if len(current_default) == 1: + p.default = True + p.save() + else: + get_or_create_default_program(domain.name)
b4a7e92bb8f3876c12982ef5f63ed1ad56f30ac7
tests/parthole_test.py
tests/parthole_test.py
"""Tests on the particle-hole model.""" import pytest from drudge import PartHoleDrudge, CR, AN from drudge.wick import wick_expand @pytest.fixture(scope='module') def parthole(spark_ctx): """Initialize the environment for a free algebra.""" dr = PartHoleDrudge(spark_ctx) return dr def test_parthole_normal_order_on_term(parthole): """Test particle-hole normal ordering on a simple term. This test act on a tensor term directly without parallelization. It is supposed for the ease of debugging. """ dr = parthole p = dr.names c_ = dr.op i = p.i j = p.j t = dr.one_body term = dr.sum( (i, p.O), (j, p.O), t[i, j] * c_[CR, i] * c_[AN, j] ).local_terms[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase, symms=dr.symms.value ) # Bare minimum inspection. assert len(res) == 2 def test_parthole_drudge_has_good_ham(parthole): """Test the Hamiltonian of the particle-hole model.""" dr = parthole # Minimum inspection. # # TODO: Add inspection of the actual value. assert dr.orig_ham.n_terms == 2 ** 2 + 2 ** 4 assert dr.full_ham.n_terms == 2 + 8 + 9 assert dr.ham_energy.n_terms == 2 assert dr.one_body_ham.n_terms == 8 assert dr.ham.n_terms == 4 + 9
Add minimal tests for PartHoleDrudge
Add minimal tests for PartHoleDrudge Here only the number of terms in the different forms of the Hamiltonian is checked. It should later be replaced with actual value inspection.
Python
mit
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
--- +++ @@ -0,0 +1,57 @@ +"""Tests on the particle-hole model.""" + +import pytest + +from drudge import PartHoleDrudge, CR, AN +from drudge.wick import wick_expand + + +@pytest.fixture(scope='module') +def parthole(spark_ctx): + """Initialize the environment for a free algebra.""" + dr = PartHoleDrudge(spark_ctx) + return dr + + +def test_parthole_normal_order_on_term(parthole): + """Test particle-hole normal ordering on a simple term. + + This test act on a tensor term directly without parallelization. It is + supposed for the ease of debugging. + """ + + dr = parthole + p = dr.names + c_ = dr.op + i = p.i + j = p.j + + t = dr.one_body + term = dr.sum( + (i, p.O), (j, p.O), t[i, j] * c_[CR, i] * c_[AN, j] + ).local_terms[0] + + res = wick_expand( + term, comparator=dr.comparator, contractor=dr.contractor, + phase=dr.phase, symms=dr.symms.value + ) + + # Bare minimum inspection. + assert len(res) == 2 + + +def test_parthole_drudge_has_good_ham(parthole): + """Test the Hamiltonian of the particle-hole model.""" + + dr = parthole + + # Minimum inspection. + # + # TODO: Add inspection of the actual value. + + assert dr.orig_ham.n_terms == 2 ** 2 + 2 ** 4 + assert dr.full_ham.n_terms == 2 + 8 + 9 + + assert dr.ham_energy.n_terms == 2 + assert dr.one_body_ham.n_terms == 8 + assert dr.ham.n_terms == 4 + 9
2ff14d38266322d3e428c29a01a3de5015269166
package/src/get_rss_feeds.py
package/src/get_rss_feeds.py
# Chap07/blogs_rss_get_posts.py import json from argparse import ArgumentParser import feedparser def get_parser(): parser = ArgumentParser() parser.add_argument('--rss-url') parser.add_argument('--json') return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() feed = feedparser.parse(args.rss_url) if feed.entries: with open(args.json, 'w') as f: for item in feed.entries: f.write(json.dumps(item)+"\n")
Add minimal feed sourcing example
Add minimal feed sourcing example
Python
mit
MrKriss/full-fact-rss-miner
--- +++ @@ -0,0 +1,22 @@ +# Chap07/blogs_rss_get_posts.py +import json +from argparse import ArgumentParser +import feedparser + + +def get_parser(): + parser = ArgumentParser() + parser.add_argument('--rss-url') + parser.add_argument('--json') + return parser + + +if __name__ == '__main__': + parser = get_parser() + args = parser.parse_args() + + feed = feedparser.parse(args.rss_url) + if feed.entries: + with open(args.json, 'w') as f: + for item in feed.entries: + f.write(json.dumps(item)+"\n")
bf3a32714e43fdb4abc226c5c353ccfc10448854
spark/wordcount.py
spark/wordcount.py
from pyspark import SparkConf, SparkContext import sys if __name__ == "__main__": if len(sys.argv) != 3: print "Incorrect number of arguments, correct usage: wordcount.py [inputfile] [outputfile]" sys.exit(-1) # set input and dictionary from args input = sys.argv[1] output = sys.argv[2] conf = SparkConf().setMaster("local").setAppName("Word Count") sc = SparkContext(conf=conf) sotu = sc.textFile(input) counts = sotu.flatMap(lambda line: line.split(" ")) \ .map(lambda word: (word, 1)) \ .reduceByKey(lambda a, b: a + b) counts.coalesce(1).saveAsTextFile(output) sc.stop() print "Done!"
Add Spark Python word count program
Add Spark Python word count program
Python
mit
bbengfort/hadoop-fundamentals,bbengfort/hadoop-fundamentals,bbengfort/hadoop-fundamentals,cycuq/hadoop-fundamentals-for-data-scientists,sssllliang/hadoop-fundamentals,nvoron23/hadoop-fundamentals
--- +++ @@ -0,0 +1,26 @@ +from pyspark import SparkConf, SparkContext +import sys + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print "Incorrect number of arguments, correct usage: wordcount.py [inputfile] [outputfile]" + sys.exit(-1) + + # set input and dictionary from args + input = sys.argv[1] + output = sys.argv[2] + + conf = SparkConf().setMaster("local").setAppName("Word Count") + sc = SparkContext(conf=conf) + + sotu = sc.textFile(input) + + counts = sotu.flatMap(lambda line: line.split(" ")) \ + .map(lambda word: (word, 1)) \ + .reduceByKey(lambda a, b: a + b) + + counts.coalesce(1).saveAsTextFile(output) + + sc.stop() + print "Done!"
244d95937c1fbae6a0f415cbdcbd4ed65cc6d8c4
CodeFights/prefSum.py
CodeFights/prefSum.py
#!/usr/local/bin/python # Code Fights Pref Sum Problem from itertools import accumulate def prefSum(a): return list(accumulate(a)) def main(): tests = [ [[1, 2, 3], [1, 3, 6]], [[1, 2, 3, -6], [1, 3, 6, 0]], [[0, 0, 0], [0, 0, 0]] ] for t in tests: res = prefSum(t[0]) ans = t[1] if ans == res: print("PASSED: prefSum({}) returned {}" .format(t[0], res)) else: print(("FAILED: prefSum({}) returned {}," "answer: {}").format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights pref sum problem
Solve Code Fights pref sum problem
Python
mit
HKuz/Test_Code
--- +++ @@ -0,0 +1,30 @@ +#!/usr/local/bin/python +# Code Fights Pref Sum Problem + +from itertools import accumulate + + +def prefSum(a): + return list(accumulate(a)) + + +def main(): + tests = [ + [[1, 2, 3], [1, 3, 6]], + [[1, 2, 3, -6], [1, 3, 6, 0]], + [[0, 0, 0], [0, 0, 0]] + ] + + for t in tests: + res = prefSum(t[0]) + ans = t[1] + if ans == res: + print("PASSED: prefSum({}) returned {}" + .format(t[0], res)) + else: + print(("FAILED: prefSum({}) returned {}," + "answer: {}").format(t[0], res, ans)) + + +if __name__ == '__main__': + main()
6c8cdc4460204cf4ffcb9b1a42da3ba7bb469031
py/g1/asyncs/kernels/tests/test_public.py
py/g1/asyncs/kernels/tests/test_public.py
import unittest from g1.asyncs import kernels class KernelsTest(unittest.TestCase): """Test ``g1.asyncs.kernels`` public interface.""" def test_contexts(self): self.assertIsNone(kernels.get_kernel()) self.assertEqual(kernels.get_all_tasks(), []) self.assertIsNone(kernels.get_current_task()) def test_with_kernel(): self.assertIsNotNone(kernels.get_kernel()) task = kernels.spawn(noop) self.assertEqual(kernels.get_all_tasks(), [task]) kernels.run(timeout=1) self.assertEqual(kernels.get_all_tasks(), []) kernels.call_with_kernel(test_with_kernel) self.assertIsNone(kernels.get_kernel()) self.assertEqual(kernels.get_all_tasks(), []) self.assertIsNone(kernels.get_current_task()) def test_timeout_after(self): with self.assertRaisesRegex(LookupError, r'ContextVar.*kernel'): kernels.timeout_after(0) @kernels.with_kernel def test_with_kernel(): with self.assertRaisesRegex(LookupError, r'no current task'): kernels.timeout_after(0) test_with_kernel() async def noop(): pass if __name__ == '__main__': unittest.main()
Add unit test of g1.asyncs.kernels public interface
Add unit test of g1.asyncs.kernels public interface
Python
mit
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
--- +++ @@ -0,0 +1,48 @@ +import unittest + +from g1.asyncs import kernels + + +class KernelsTest(unittest.TestCase): + """Test ``g1.asyncs.kernels`` public interface.""" + + def test_contexts(self): + + self.assertIsNone(kernels.get_kernel()) + self.assertEqual(kernels.get_all_tasks(), []) + self.assertIsNone(kernels.get_current_task()) + + def test_with_kernel(): + self.assertIsNotNone(kernels.get_kernel()) + + task = kernels.spawn(noop) + self.assertEqual(kernels.get_all_tasks(), [task]) + + kernels.run(timeout=1) + self.assertEqual(kernels.get_all_tasks(), []) + + kernels.call_with_kernel(test_with_kernel) + + self.assertIsNone(kernels.get_kernel()) + self.assertEqual(kernels.get_all_tasks(), []) + self.assertIsNone(kernels.get_current_task()) + + def test_timeout_after(self): + + with self.assertRaisesRegex(LookupError, r'ContextVar.*kernel'): + kernels.timeout_after(0) + + @kernels.with_kernel + def test_with_kernel(): + with self.assertRaisesRegex(LookupError, r'no current task'): + kernels.timeout_after(0) + + test_with_kernel() + + +async def noop(): + pass + + +if __name__ == '__main__': + unittest.main()
c0496d83049e02db718941b7cdd6fa0bacd28ce2
python-tools/mods/raven/transport/http.py
python-tools/mods/raven/transport/http.py
# See https://github.com/getsentry/raven-python/issues/1109 """ raven.transport.http ~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from __future__ import absolute_import import requests from raven.utils.compat import string_types, urllib2 from raven.conf import defaults from raven.exceptions import APIError, RateLimited from raven.transport.base import Transport from raven.utils.http import urlopen class HTTPTransport(Transport): scheme = ['sync+http', 'sync+https'] def __init__(self, timeout=defaults.TIMEOUT, verify_ssl=True, ca_certs=defaults.CA_BUNDLE): if isinstance(timeout, string_types): timeout = int(timeout) if isinstance(verify_ssl, string_types): verify_ssl = bool(int(verify_ssl)) self.timeout = timeout self.verify_ssl = verify_ssl self.ca_certs = ca_certs def send(self, url, data, headers): """ Sends a request to a remote webserver using HTTP POST. """ # req = urllib2.Request(url, headers=headers) try: response = requests.post( url=url, data=data, timeout=self.timeout, verify=self.verify_ssl, headers=headers ) except urllib2.HTTPError as exc: msg = exc.headers.get('x-sentry-error') code = exc.getcode() if code == 429: try: retry_after = int(exc.headers.get('retry-after')) except (ValueError, TypeError): retry_after = 0 raise RateLimited(msg, retry_after) elif msg: raise APIError(msg, code) else: raise return response
Add raven SSL fix mod.
Add raven SSL fix mod. See https://github.com/getsentry/raven-python/issues/1109.
Python
mit
Mediamoose/python-tools
--- +++ @@ -0,0 +1,60 @@ +# See https://github.com/getsentry/raven-python/issues/1109 + +""" +raven.transport.http +~~~~~~~~~~~~~~~~~~~~ +:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details. +:license: BSD, see LICENSE for more details. +""" +from __future__ import absolute_import +import requests + +from raven.utils.compat import string_types, urllib2 +from raven.conf import defaults +from raven.exceptions import APIError, RateLimited +from raven.transport.base import Transport +from raven.utils.http import urlopen + + +class HTTPTransport(Transport): + scheme = ['sync+http', 'sync+https'] + + def __init__(self, timeout=defaults.TIMEOUT, verify_ssl=True, + ca_certs=defaults.CA_BUNDLE): + if isinstance(timeout, string_types): + timeout = int(timeout) + if isinstance(verify_ssl, string_types): + verify_ssl = bool(int(verify_ssl)) + + self.timeout = timeout + self.verify_ssl = verify_ssl + self.ca_certs = ca_certs + + def send(self, url, data, headers): + """ + Sends a request to a remote webserver using HTTP POST. + """ + # req = urllib2.Request(url, headers=headers) + + try: + response = requests.post( + url=url, + data=data, + timeout=self.timeout, + verify=self.verify_ssl, + headers=headers + ) + except urllib2.HTTPError as exc: + msg = exc.headers.get('x-sentry-error') + code = exc.getcode() + if code == 429: + try: + retry_after = int(exc.headers.get('retry-after')) + except (ValueError, TypeError): + retry_after = 0 + raise RateLimited(msg, retry_after) + elif msg: + raise APIError(msg, code) + else: + raise + return response
0ac7a79dda372763c88b237e269aa9f955b88fdd
Titanic_Survival_Exploration/Titanic_Surv_Expl.py
Titanic_Survival_Exploration/Titanic_Surv_Expl.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jul 16 15:53:32 2017 @author: Anani Assoutovi """ import numpy as np import pandas as pd
Add A new Folder and file @AnaniSkywalker
Add A new Folder and file @AnaniSkywalker
Python
mit
AnaniSkywalker/UDACITY_Machine_Learning,AnaniSkywalker/UDACITY_Machine_Learning
--- +++ @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Sun Jul 16 15:53:32 2017 + +@author: Anani Assoutovi +""" + +import numpy as np +import pandas as pd
d73dfec24b2b77edcab5a1daf1acb35640320aa4
Lib/test/test_platform.py
Lib/test/test_platform.py
import unittest from test import test_support import platform class PlatformTest(unittest.TestCase): def test_architecture(self): res = platform.architecture() def test_machine(self): res = platform.machine() def test_node(self): res = platform.node() def test_platform(self): for aliased in (False, True): for terse in (False, True): res = platform.platform(aliased, terse) def test_processor(self): res = platform.processor() def test_python_build(self): res = platform.python_build() def test_python_compiler(self): res = platform.python_compiler() def test_version(self): res1 = platform.version() res2 = platform.version_tuple() self.assertEqual(res1, ".".join(res2)) def test_release(self): res = platform.release() def test_system(self): res = platform.system() def test_version(self): res = platform.version() def test_system_alias(self): res = platform.system_alias( platform.system(), platform.release(), platform.version(), ) def test_uname(self): res = platform.uname() def test_java_ver(self): res = platform.java_ver() def test_win32_ver(self): res = platform.win32_ver() def test_mac_ver(self): res = platform.mac_ver() def test_dist(self): res = platform.dist() def test_libc_ver(self): res = platform.libc_ver() def test_main(): test_support.run_unittest( PlatformTest ) if __name__ == '__main__': test_main()
Add a rudimentary test for the platform module that at least calls each documented function once.
Add a rudimentary test for the platform module that at least calls each documented function once.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
--- +++ @@ -0,0 +1,74 @@ +import unittest +from test import test_support +import platform + +class PlatformTest(unittest.TestCase): + def test_architecture(self): + res = platform.architecture() + + def test_machine(self): + res = platform.machine() + + def test_node(self): + res = platform.node() + + def test_platform(self): + for aliased in (False, True): + for terse in (False, True): + res = platform.platform(aliased, terse) + + def test_processor(self): + res = platform.processor() + + def test_python_build(self): + res = platform.python_build() + + def test_python_compiler(self): + res = platform.python_compiler() + + def test_version(self): + res1 = platform.version() + res2 = platform.version_tuple() + self.assertEqual(res1, ".".join(res2)) + + def test_release(self): + res = platform.release() + + def test_system(self): + res = platform.system() + + def test_version(self): + res = platform.version() + + def test_system_alias(self): + res = platform.system_alias( + platform.system(), + platform.release(), + platform.version(), + ) + + def test_uname(self): + res = platform.uname() + + def test_java_ver(self): + res = platform.java_ver() + + def test_win32_ver(self): + res = platform.win32_ver() + + def test_mac_ver(self): + res = platform.mac_ver() + + def test_dist(self): + res = platform.dist() + + def test_libc_ver(self): + res = platform.libc_ver() + +def test_main(): + test_support.run_unittest( + PlatformTest + ) + +if __name__ == '__main__': + test_main()
3a06a24c5ce0e5357dfc87eccfc198fd05e881e4
corehq/apps/es/tests/test_user_es.py
corehq/apps/es/tests/test_user_es.py
import uuid from django.test import TestCase from pillowtop.es_utils import initialize_index_and_mapping from corehq.apps.domain.shortcuts import create_domain from corehq.apps.es import UserES from corehq.apps.es.tests.utils import es_test from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users from corehq.apps.users.models import CommCareUser from corehq.elastic import get_es_new from corehq.pillows.mappings.user_mapping import USER_INDEX, USER_INDEX_INFO from corehq.util.elastic import ensure_index_deleted from corehq.util.es.testing import sync_users_to_es @es_test class TestUserES(TestCase): @classmethod def setUpClass(cls): super().setUpClass() initialize_index_and_mapping(get_es_new(), USER_INDEX_INFO) cls.elasticsearch = get_es_new() cls.domain = 'test-user-es' cls.domain_obj = create_domain(cls.domain) @classmethod def tearDownClass(cls): delete_all_users() cls.domain_obj.delete() ensure_index_deleted(USER_INDEX) super().tearDownClass() def _create_mobile_worker(self, metadata): CommCareUser.create( domain=self.domain, username=uuid.uuid4().hex, password="*****", created_by=None, created_via=None, metadata=metadata, ) def test_user_data_query(self): with sync_users_to_es(): self._create_mobile_worker(metadata={'foo': 'bar'}) self._create_mobile_worker(metadata={'foo': 'baz'}) self._create_mobile_worker(metadata={'foo': 'womp', 'fu': 'bar'}) get_es_new().indices.refresh(USER_INDEX) self.assertEqual(UserES().metadata('foo', 'bar').count(), 1)
Write a basic test for filtering by user data
Write a basic test for filtering by user data
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,52 @@ +import uuid + +from django.test import TestCase + +from pillowtop.es_utils import initialize_index_and_mapping + +from corehq.apps.domain.shortcuts import create_domain +from corehq.apps.es import UserES +from corehq.apps.es.tests.utils import es_test +from corehq.apps.users.dbaccessors.all_commcare_users import delete_all_users +from corehq.apps.users.models import CommCareUser +from corehq.elastic import get_es_new +from corehq.pillows.mappings.user_mapping import USER_INDEX, USER_INDEX_INFO +from corehq.util.elastic import ensure_index_deleted +from corehq.util.es.testing import sync_users_to_es + + +@es_test +class TestUserES(TestCase): + + @classmethod + def setUpClass(cls): + super().setUpClass() + initialize_index_and_mapping(get_es_new(), USER_INDEX_INFO) + cls.elasticsearch = get_es_new() + cls.domain = 'test-user-es' + cls.domain_obj = create_domain(cls.domain) + + @classmethod + def tearDownClass(cls): + delete_all_users() + cls.domain_obj.delete() + ensure_index_deleted(USER_INDEX) + super().tearDownClass() + + def _create_mobile_worker(self, metadata): + CommCareUser.create( + domain=self.domain, + username=uuid.uuid4().hex, + password="*****", + created_by=None, + created_via=None, + metadata=metadata, + ) + + def test_user_data_query(self): + with sync_users_to_es(): + self._create_mobile_worker(metadata={'foo': 'bar'}) + self._create_mobile_worker(metadata={'foo': 'baz'}) + self._create_mobile_worker(metadata={'foo': 'womp', 'fu': 'bar'}) + get_es_new().indices.refresh(USER_INDEX) + self.assertEqual(UserES().metadata('foo', 'bar').count(), 1)
5b333f9547908db05663afacc7487749dda168fc
dynd/tests/test_array_as_py.py
dynd/tests/test_array_as_py.py
import sys import unittest from dynd import nd, ndt class TestArrayAsPy(unittest.TestCase): def test_struct_or_tuple(self): a = nd.array((3, "testing", 1.5), type='{x:int, y:string, z:real}') self.assertEqual(nd.as_py(a), {'x': 3, 'y': "testing", 'z': 1.5}) self.assertEqual(nd.as_py(a, tuple=True), (3, "testing", 1.5)) a = nd.array([(1, 1.5), (2, 3.5)], dtype='{x:int, y:real}') self.assertEqual(nd.as_py(a), [{'x': 1, 'y': 1.5}, {'x': 2, 'y': 3.5}]) self.assertEqual(nd.as_py(a, tuple=True), [(1, 1.5), (2, 3.5)]) # Slightly bigger example data = { "type": "ImageCollection", "images": [{ "Width": 800, "Height": 600, "Title": "View from 15th Floor", "Thumbnail": { "Url": "http://www.example.com/image/481989943", "Height": 125, "Width": 100 }, "IDs": [116, 943, 234, 38793] }] } ordered = (u'ImageCollection', [(800, 600, u'View from 15th Floor', (u'http://www.example.com/image/481989943', 125, 100), [116, 943, 234, 38793]),]) tp = ndt.type("""{ type: string, images: var * { Width: int16, Height: int16, Title: string, Thumbnail: { Url: string, Height: int16, Width: int16, }, IDs: var * int32, } } """) a = nd.array(data, type=tp) self.assertEqual(nd.as_py(a), data) self.assertEqual(nd.as_py(a, tuple=True), ordered)
Add tests for tuple option to nd.as_py
Add tests for tuple option to nd.as_py
Python
bsd-2-clause
pombredanne/dynd-python,pombredanne/dynd-python,ContinuumIO/dynd-python,insertinterestingnamehere/dynd-python,izaid/dynd-python,pombredanne/dynd-python,izaid/dynd-python,insertinterestingnamehere/dynd-python,insertinterestingnamehere/dynd-python,mwiebe/dynd-python,michaelpacer/dynd-python,aterrel/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,pombredanne/dynd-python,michaelpacer/dynd-python,ContinuumIO/dynd-python,michaelpacer/dynd-python,insertinterestingnamehere/dynd-python,aterrel/dynd-python,cpcloud/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python,mwiebe/dynd-python,mwiebe/dynd-python,aterrel/dynd-python,izaid/dynd-python,aterrel/dynd-python,izaid/dynd-python,michaelpacer/dynd-python,ContinuumIO/dynd-python,cpcloud/dynd-python
--- +++ @@ -0,0 +1,51 @@ +import sys +import unittest +from dynd import nd, ndt + +class TestArrayAsPy(unittest.TestCase): + def test_struct_or_tuple(self): + a = nd.array((3, "testing", 1.5), type='{x:int, y:string, z:real}') + self.assertEqual(nd.as_py(a), {'x': 3, 'y': "testing", 'z': 1.5}) + self.assertEqual(nd.as_py(a, tuple=True), (3, "testing", 1.5)) + a = nd.array([(1, 1.5), (2, 3.5)], dtype='{x:int, y:real}') + self.assertEqual(nd.as_py(a), [{'x': 1, 'y': 1.5}, {'x': 2, 'y': 3.5}]) + self.assertEqual(nd.as_py(a, tuple=True), [(1, 1.5), (2, 3.5)]) + + # Slightly bigger example + data = { + "type": "ImageCollection", + "images": [{ + "Width": 800, + "Height": 600, + "Title": "View from 15th Floor", + "Thumbnail": { + "Url": "http://www.example.com/image/481989943", + "Height": 125, + "Width": 100 + }, + "IDs": [116, 943, 234, 38793] + }] + } + ordered = (u'ImageCollection', + [(800, 600, u'View from 15th Floor', + (u'http://www.example.com/image/481989943', 125, 100), + [116, 943, 234, 38793]),]) + + tp = ndt.type("""{ + type: string, + images: var * { + Width: int16, + Height: int16, + Title: string, + Thumbnail: { + Url: string, + Height: int16, + Width: int16, + }, + IDs: var * int32, + } + } + """) + a = nd.array(data, type=tp) + self.assertEqual(nd.as_py(a), data) + self.assertEqual(nd.as_py(a, tuple=True), ordered)
55017eadf948fb951e6303cd4c914c968d6f60b2
cmsplugin_contact/migrations_django/0003_auto_20161107_1614.py
cmsplugin_contact/migrations_django/0003_auto_20161107_1614.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.9 on 2016-11-07 15:14 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cmsplugin_contact', '0002_auto_20160810_1130'), ] operations = [ migrations.AlterField( model_name='contact', name='cmsplugin_ptr', field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cmsplugin_contact_contact', serialize=False, to='cms.CMSPlugin'), ), migrations.AlterField( model_name='contact', name='form_layout', field=models.CharField(choices=[(b'cmsplugin_contact.forms.ContactForm', 'default')], help_text='Choice the layout of contact form', max_length=255, verbose_name='Form Layout'), ), ]
Add an auto-generated missing migration
Add an auto-generated missing migration
Python
bsd-2-clause
maccesch/cmsplugin-contact,maccesch/cmsplugin-contact
--- +++ @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.9 on 2016-11-07 15:14 +from __future__ import unicode_literals + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('cmsplugin_contact', '0002_auto_20160810_1130'), + ] + + operations = [ + migrations.AlterField( + model_name='contact', + name='cmsplugin_ptr', + field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='cmsplugin_contact_contact', serialize=False, to='cms.CMSPlugin'), + ), + migrations.AlterField( + model_name='contact', + name='form_layout', + field=models.CharField(choices=[(b'cmsplugin_contact.forms.ContactForm', 'default')], help_text='Choice the layout of contact form', max_length=255, verbose_name='Form Layout'), + ), + ]
dccd8403a93a0c86054d61142198643d30b8d9af
migrations/versions/0c98b865104f_add_score_user_id_column.py
migrations/versions/0c98b865104f_add_score_user_id_column.py
"""Add score.user_id column Revision ID: 0c98b865104f Revises: 7b6a65c708b9 Create Date: 2016-10-27 19:03:44.901639 """ # revision identifiers, used by Alembic. revision = '0c98b865104f' down_revision = '7b6a65c708b9' from alembic import op import sqlalchemy as sa import server def upgrade(): op.add_column('score', sa.Column('user_id', sa.Integer(), nullable=True)) op.create_foreign_key(op.f('fk_score_user_id_user'), 'score', 'user', ['user_id'], ['id']) op.execute(''' UPDATE score AS s JOIN backup AS b ON b.id = s.backup_id SET s.user_id = b.submitter_id ''') op.alter_column('score', 'user_id', existing_type=sa.Integer(), nullable=False) def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_constraint(op.f('fk_score_user_id_user'), 'score', type_='foreignkey') op.drop_column('score', 'user_id') ### end Alembic commands ###
Add migration that sets score.user_id appropriately
Add migration that sets score.user_id appropriately
Python
apache-2.0
Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok,Cal-CS-61A-Staff/ok
--- +++ @@ -0,0 +1,34 @@ +"""Add score.user_id column + +Revision ID: 0c98b865104f +Revises: 7b6a65c708b9 +Create Date: 2016-10-27 19:03:44.901639 + +""" + +# revision identifiers, used by Alembic. +revision = '0c98b865104f' +down_revision = '7b6a65c708b9' + +from alembic import op +import sqlalchemy as sa +import server + + +def upgrade(): + op.add_column('score', sa.Column('user_id', sa.Integer(), nullable=True)) + op.create_foreign_key(op.f('fk_score_user_id_user'), 'score', 'user', ['user_id'], ['id']) + + op.execute(''' + UPDATE score AS s + JOIN backup AS b ON b.id = s.backup_id + SET s.user_id = b.submitter_id + ''') + + op.alter_column('score', 'user_id', existing_type=sa.Integer(), nullable=False) + +def downgrade(): + ### commands auto generated by Alembic - please adjust! ### + op.drop_constraint(op.f('fk_score_user_id_user'), 'score', type_='foreignkey') + op.drop_column('score', 'user_id') + ### end Alembic commands ###
b2bef05e0490d161ecec07b4403964c19875ee5d
numba/cuda/tests/nocuda/test_function_resolution.py
numba/cuda/tests/nocuda/test_function_resolution.py
from numba.cuda.testing import unittest, skip_on_cudasim import operator from numba.core import types, typing @skip_on_cudasim("Skip on simulator due to use of cuda_target") class TestFunctionResolutionNoCuda(unittest.TestCase): def test_fp16_binary_operators(self): from numba.cuda.descriptor import cuda_target ops = (operator.add, operator.iadd, operator.sub, operator.isub, operator.mul, operator.imul) for op in ops: fp16 = types.float16 typingctx = cuda_target.typing_context typingctx.refresh() fnty = typingctx.resolve_value_type(op) out = typingctx.resolve_function_type(fnty, (fp16, fp16), {}) if out != typing.signature(fp16, fp16, fp16): raise AssertionError(out) def test_fp16_unary_operators(self): from numba.cuda.descriptor import cuda_target ops = (operator.neg, abs) for op in ops: fp16 = types.float16 typingctx = cuda_target.typing_context typingctx.refresh() fnty = typingctx.resolve_value_type(op) out = typingctx.resolve_function_type(fnty, (fp16,), {}) if out != typing.signature(fp16, fp16): raise AssertionError(out) if __name__ == '__main__': unittest.main()
Add new case for function resolution of fp16 unary and binary operators
Add new case for function resolution of fp16 unary and binary operators
Python
bsd-2-clause
numba/numba,cpcloud/numba,cpcloud/numba,cpcloud/numba,numba/numba,numba/numba,numba/numba,cpcloud/numba,numba/numba,cpcloud/numba
--- +++ @@ -0,0 +1,35 @@ +from numba.cuda.testing import unittest, skip_on_cudasim +import operator +from numba.core import types, typing + + +@skip_on_cudasim("Skip on simulator due to use of cuda_target") +class TestFunctionResolutionNoCuda(unittest.TestCase): + def test_fp16_binary_operators(self): + from numba.cuda.descriptor import cuda_target + ops = (operator.add, operator.iadd, operator.sub, operator.isub, + operator.mul, operator.imul) + for op in ops: + fp16 = types.float16 + typingctx = cuda_target.typing_context + typingctx.refresh() + fnty = typingctx.resolve_value_type(op) + out = typingctx.resolve_function_type(fnty, (fp16, fp16), {}) + if out != typing.signature(fp16, fp16, fp16): + raise AssertionError(out) + + def test_fp16_unary_operators(self): + from numba.cuda.descriptor import cuda_target + ops = (operator.neg, abs) + for op in ops: + fp16 = types.float16 + typingctx = cuda_target.typing_context + typingctx.refresh() + fnty = typingctx.resolve_value_type(op) + out = typingctx.resolve_function_type(fnty, (fp16,), {}) + if out != typing.signature(fp16, fp16): + raise AssertionError(out) + + +if __name__ == '__main__': + unittest.main()
c456ec0a5dd4c48b13d82930eab32c85bcc0e7be
migrations/versions/75f579d01f0d_.py
migrations/versions/75f579d01f0d_.py
"""empty message Revision ID: 75f579d01f0d Revises: 25f4f234760c Create Date: 2017-05-06 23:15:02.228272 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '75f579d01f0d' down_revision = '25f4f234760c' branch_labels = None depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('graphs', sa.Column('short_url', sa.String(), nullable=True)) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('graphs', 'short_url') # ### end Alembic commands ###
Add short_url column to Graph
Add short_url column to Graph Make username a nullable field in Graph, so people don't have to register to share graphs.
Python
mit
stardust66/math3d,ChristopherChudzicki/math3d,ChristopherChudzicki/math3d,stardust66/math3d,ChristopherChudzicki/math3d,ChristopherChudzicki/math3d,stardust66/math3d,stardust66/math3d
--- +++ @@ -0,0 +1,28 @@ +"""empty message + +Revision ID: 75f579d01f0d +Revises: 25f4f234760c +Create Date: 2017-05-06 23:15:02.228272 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '75f579d01f0d' +down_revision = '25f4f234760c' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.add_column('graphs', sa.Column('short_url', sa.String(), nullable=True)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_column('graphs', 'short_url') + # ### end Alembic commands ###
189ec6dabc25eb91335568a7e6547483f9ec2960
modules/tools/extractor/extractor.py
modules/tools/extractor/extractor.py
#!/usr/bin/env python ############################################################################### # Copyright 2017 The Apollo Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import rospy from std_msgs.msg import String from modules.planning.proto.planning_pb2 import ADCTrajectory from modules.routing.proto.routing_pb2 import RoutingResponse class Extractor(object): def __init__(self): self.routing = rospy.Publisher( '/apollo/routing_response', RoutingResponse, queue_size=1) def callback_planning(self, data): self.routing.publish(data.debug.planning_data.routing) print "New Planning" def main(): """ Main function """ extract = Extractor() rospy.init_node('extract_routing', anonymous=True) planning_sub = rospy.Subscriber( '/apollo/planning', ADCTrajectory, extract.callback_planning, queue_size=1) rospy.spin() if __name__ == '__main__': main()
Add tool to extract routing from planning debug
Add tool to extract routing from planning debug
Python
apache-2.0
ApolloAuto/apollo,ycool/apollo,ApolloAuto/apollo,xiaoxq/apollo,xiaoxq/apollo,ycool/apollo,ycool/apollo,ycool/apollo,jinghaomiao/apollo,wanglei828/apollo,wanglei828/apollo,ycool/apollo,ApolloAuto/apollo,jinghaomiao/apollo,jinghaomiao/apollo,jinghaomiao/apollo,xiaoxq/apollo,wanglei828/apollo,wanglei828/apollo,xiaoxq/apollo,jinghaomiao/apollo,xiaoxq/apollo,jinghaomiao/apollo,ApolloAuto/apollo,wanglei828/apollo,ApolloAuto/apollo,wanglei828/apollo,ycool/apollo,xiaoxq/apollo,ApolloAuto/apollo
--- +++ @@ -0,0 +1,51 @@ +#!/usr/bin/env python + +############################################################################### +# Copyright 2017 The Apollo Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +############################################################################### + +import rospy +from std_msgs.msg import String +from modules.planning.proto.planning_pb2 import ADCTrajectory +from modules.routing.proto.routing_pb2 import RoutingResponse + + +class Extractor(object): + + def __init__(self): + self.routing = rospy.Publisher( + '/apollo/routing_response', RoutingResponse, queue_size=1) + + def callback_planning(self, data): + self.routing.publish(data.debug.planning_data.routing) + print "New Planning" + + +def main(): + """ + Main function + """ + extract = Extractor() + rospy.init_node('extract_routing', anonymous=True) + planning_sub = rospy.Subscriber( + '/apollo/planning', + ADCTrajectory, + extract.callback_planning, + queue_size=1) + rospy.spin() + + +if __name__ == '__main__': + main()
0e98d0fae4a81deec57ae162b8db5bcf950b3ea3
cnxarchive/sql/migrations/20160128110515_mimetype_on_files_table.py
cnxarchive/sql/migrations/20160128110515_mimetype_on_files_table.py
# -*- coding: utf-8 -*- """\ - Add a ``media_type`` column to the ``files`` table. - Move the mimetype value from ``module_files`` to ``files``. """ from __future__ import print_function import sys def up(cursor): # Add a ``media_type`` column to the ``files`` table. cursor.execute("ALTER TABLE files ADD COLUMN media_type TEXT") # Move the mimetype value from ``module_files`` to ``files``. cursor.execute("UPDATE files AS f SET media_type = mf.mimetype " "FROM module_files AS mf " "WHERE mf.fileid = f.fileid") # Warn about missing mimetype. cursor.execute("SELECT fileid, sha1 " "FROM files AS f " "WHERE f.fileid NOT IN (SELECT fileid FROM module_files)") rows = '\n'.join(['{}, {}'.format(fid, sha1) for fid, sha1 in cursor.fetchall()]) print("These files (fileid, sha1) do not have a corresponding " "module_files entry:\n{}\n".format(rows), file=sys.stderr) def down(cursor): # Remove the ``mimetype`` column from the ``files`` table. cursor.execute("ALTER TABLE files DROP COLUMN media_type")
Move mimetype column from module_files to files
Move mimetype column from module_files to files
Python
agpl-3.0
Connexions/cnx-archive,Connexions/cnx-archive
--- +++ @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +"""\ +- Add a ``media_type`` column to the ``files`` table. +- Move the mimetype value from ``module_files`` to ``files``. + +""" +from __future__ import print_function +import sys + + +def up(cursor): + # Add a ``media_type`` column to the ``files`` table. + cursor.execute("ALTER TABLE files ADD COLUMN media_type TEXT") + + # Move the mimetype value from ``module_files`` to ``files``. + cursor.execute("UPDATE files AS f SET media_type = mf.mimetype " + "FROM module_files AS mf " + "WHERE mf.fileid = f.fileid") + + # Warn about missing mimetype. + cursor.execute("SELECT fileid, sha1 " + "FROM files AS f " + "WHERE f.fileid NOT IN (SELECT fileid FROM module_files)") + rows = '\n'.join(['{}, {}'.format(fid, sha1) + for fid, sha1 in cursor.fetchall()]) + print("These files (fileid, sha1) do not have a corresponding " + "module_files entry:\n{}\n".format(rows), + file=sys.stderr) + + +def down(cursor): + # Remove the ``mimetype`` column from the ``files`` table. + cursor.execute("ALTER TABLE files DROP COLUMN media_type")
ce9657eec421eb626f22405ab744f1554d8c376f
src/utils/clean_categories.py
src/utils/clean_categories.py
import re def clean_categories(text): """Replace Wikipedia category links with the name of the category in the text of an article. Text like "[[Category:Foo]]" will be replaced with "Foo". Sorting hints are thrown away during this cleaning, so text like "[[Category:Bar|Sorting hint]]" will be replaced with "Bar". Args: text (str): The full text of a Wikipedia article in one string. Returns: str: The full text with Category links replaced. """ # Since Regexes are unreadable, let me explain: # # "\[\[Category:([^\[\]|]*)[^\]]*\]\]" consists of several parts: # # \[\[ matches "[[" # # Category: matches the text "Category:" # # (...) is a capture group meaning roughly "the expression inside this # group is a block that I want to extract" # # [^...] is a negated set which means "do not match any characters in # this set". # # \[, \], and | match "[", "]", and "|" in the text respectively # # * means "match zero or more of the preceding regex defined items" # # [^\]]* means match any character but a ']' # # \]\] matches "]]" # # So putting it all together, the regex does this: # # Finds "[[" followed by "Category:" and then matches any number # (including zero) characters after that that are not the excluded # characters "[", "]", or "|". These matched characters are saved. When # it hits an excluded character, it begins matching any characters # except "[". It throws these matched characters away. It terminates # when it finds "]]". # return re.sub(r'\[\[Category:([^\[\]|]*)[^\]]*\]\]', r'\1', text)
Add script to clean Wikipedia categories
Add script to clean Wikipedia categories
Python
apache-2.0
tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes,tiffanyj41/hermes
--- +++ @@ -0,0 +1,48 @@ +import re +def clean_categories(text): + """Replace Wikipedia category links with the name of the category in the + text of an article. + + Text like "[[Category:Foo]]" will be replaced with "Foo". Sorting hints are + thrown away during this cleaning, so text like "[[Category:Bar|Sorting + hint]]" will be replaced with "Bar". + + Args: + text (str): The full text of a Wikipedia article in one string. + + Returns: + str: The full text with Category links replaced. + + """ + # Since Regexes are unreadable, let me explain: + # + # "\[\[Category:([^\[\]|]*)[^\]]*\]\]" consists of several parts: + # + # \[\[ matches "[[" + # + # Category: matches the text "Category:" + # + # (...) is a capture group meaning roughly "the expression inside this + # group is a block that I want to extract" + # + # [^...] is a negated set which means "do not match any characters in + # this set". + # + # \[, \], and | match "[", "]", and "|" in the text respectively + # + # * means "match zero or more of the preceding regex defined items" + # + # [^\]]* means match any character but a ']' + # + # \]\] matches "]]" + # + # So putting it all together, the regex does this: + # + # Finds "[[" followed by "Category:" and then matches any number + # (including zero) characters after that that are not the excluded + # characters "[", "]", or "|". These matched characters are saved. When + # it hits an excluded character, it begins matching any characters + # except "[". It throws these matched characters away. It terminates + # when it finds "]]". + # + return re.sub(r'\[\[Category:([^\[\]|]*)[^\]]*\]\]', r'\1', text)
190153d06864b64275fbd515c2f1a2b8c8a5cdba
tests/test_specs.py
tests/test_specs.py
from imagekit.cachefiles import ImageCacheFile from nose.tools import assert_false from .imagegenerators import TestSpec def test_no_source(): """ Ensure sourceless specs are falsy. """ spec = TestSpec(source=None) file = ImageCacheFile(spec) assert_false(bool(file))
Add test to ensure sourceless specs are falsy
Add test to ensure sourceless specs are falsy Currently failing; related to #187
Python
bsd-3-clause
tawanda/django-imagekit,FundedByMe/django-imagekit,FundedByMe/django-imagekit,tawanda/django-imagekit
--- +++ @@ -0,0 +1,12 @@ +from imagekit.cachefiles import ImageCacheFile +from nose.tools import assert_false +from .imagegenerators import TestSpec + + +def test_no_source(): + """ + Ensure sourceless specs are falsy. + """ + spec = TestSpec(source=None) + file = ImageCacheFile(spec) + assert_false(bool(file))
c831bfb8e5e28fdcf0dff818dd08274fa2fdb5cd
scripts/consistency/fix_tag_guids.py
scripts/consistency/fix_tag_guids.py
"""Removes legacy Tag objects from the Guid namespace. Tags were once GuidStoredObjects, but are no longer. The Guid table was not cleaned of these references. This caused a specific issue where "project" was a Tag id, and therefore was resolveable to a Guid object, thereby breaking our routing system for URLs beginning with /project/. This script must be run from the OSF root directory for the imports to work. :: $ python -m scripts.consistency.fix_tag_guids dry $ python -m scripts.consistency.fix_tag_guids """ import sys from nose.tools import * # noqa from framework import Q from framework.guid.model import Guid from website.app import init_app from tests.base import OsfTestCase from tests.factories import TagFactory, NodeFactory QUERY = Q('referent.1', 'eq', "tag") def main(): # Set up storage backends init_app(routes=False) targets = get_targets() if 'dry' in sys.argv: print('{n} invalid GUID objects will be removed.'.format(n=targets.count())) sys.exit(0) else: do_migration() if get_targets().count() == 0: print('All invalid references removed.') else: print('Failed to remove all references.') sys.exit(1) def do_migration(): Guid.remove(QUERY) def get_targets(): return Guid.find(QUERY) class TestMigrateLegacyTagGUIDObjects(OsfTestCase): def setUp(self): OsfTestCase.setUp(self) tag = TagFactory() self.target_guid = Guid(referent=tag) self.target_guid.save() self.nontarget_guid = Guid(referent=NodeFactory()) def test_get_targets(self): result = list(get_targets()) assert_in(self.target_guid, result) assert_not_in(self.nontarget_guid, result) def test_do_migration(self): # sanity check assert_equal(len(list(get_targets())), 1) do_migration() assert_equal(len(list(get_targets())), 0) if __name__ == '__main__': main()
Refactor of migration script for migrating invalid Guid objects
Refactor of migration script for migrating invalid Guid objects - Remove import side effects - Add tests
Python
apache-2.0
lyndsysimon/osf.io,binoculars/osf.io,doublebits/osf.io,rdhyee/osf.io,arpitar/osf.io,brandonPurvis/osf.io,abought/osf.io,erinspace/osf.io,chennan47/osf.io,felliott/osf.io,aaxelb/osf.io,Ghalko/osf.io,leb2dg/osf.io,cldershem/osf.io,mattclark/osf.io,SSJohns/osf.io,baylee-d/osf.io,cslzchen/osf.io,felliott/osf.io,zamattiac/osf.io,cwisecarver/osf.io,jinluyuan/osf.io,revanthkolli/osf.io,RomanZWang/osf.io,felliott/osf.io,Nesiehr/osf.io,cosenal/osf.io,kushG/osf.io,mfraezz/osf.io,lamdnhan/osf.io,njantrania/osf.io,samchrisinger/osf.io,doublebits/osf.io,KAsante95/osf.io,baylee-d/osf.io,jinluyuan/osf.io,zachjanicki/osf.io,RomanZWang/osf.io,alexschiller/osf.io,mattclark/osf.io,MerlinZhang/osf.io,lamdnhan/osf.io,Ghalko/osf.io,doublebits/osf.io,cwisecarver/osf.io,DanielSBrown/osf.io,erinspace/osf.io,SSJohns/osf.io,brianjgeiger/osf.io,fabianvf/osf.io,barbour-em/osf.io,Ghalko/osf.io,barbour-em/osf.io,jinluyuan/osf.io,sbt9uc/osf.io,brianjgeiger/osf.io,asanfilippo7/osf.io,caseyrollins/osf.io,emetsger/osf.io,GageGaskins/osf.io,rdhyee/osf.io,emetsger/osf.io,brianjgeiger/osf.io,laurenrevere/osf.io,caseyrygt/osf.io,cosenal/osf.io,danielneis/osf.io,bdyetton/prettychart,MerlinZhang/osf.io,kushG/osf.io,emetsger/osf.io,hmoco/osf.io,DanielSBrown/osf.io,sloria/osf.io,samanehsan/osf.io,mfraezz/osf.io,samanehsan/osf.io,GageGaskins/osf.io,ckc6cz/osf.io,petermalcolm/osf.io,kushG/osf.io,TomHeatwole/osf.io,zamattiac/osf.io,ticklemepierce/osf.io,caneruguz/osf.io,njantrania/osf.io,DanielSBrown/osf.io,asanfilippo7/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,jnayak1/osf.io,zachjanicki/osf.io,doublebits/osf.io,crcresearch/osf.io,pattisdr/osf.io,arpitar/osf.io,acshi/osf.io,ticklemepierce/osf.io,baylee-d/osf.io,chrisseto/osf.io,MerlinZhang/osf.io,petermalcolm/osf.io,kwierman/osf.io,revanthkolli/osf.io,jolene-esposito/osf.io,Nesiehr/osf.io,billyhunt/osf.io,lyndsysimon/osf.io,ckc6cz/osf.io,jnayak1/osf.io,cwisecarver/osf.io,jnayak1/osf.io,zachjanicki/osf.io,Nesiehr/osf.io,reinaH/osf.io,kwierman/osf.io,aaxelb/osf.io,jeffreyliu3230/osf.io,leb2dg/osf.io,jeffreyliu3230/osf.io,haoyuchen1992/osf.io,samanehsan/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,aaxelb/osf.io,haoyuchen1992/osf.io,ckc6cz/osf.io,rdhyee/osf.io,SSJohns/osf.io,arpitar/osf.io,laurenrevere/osf.io,reinaH/osf.io,lamdnhan/osf.io,mluo613/osf.io,cldershem/osf.io,RomanZWang/osf.io,petermalcolm/osf.io,ckc6cz/osf.io,GaryKriebel/osf.io,arpitar/osf.io,caneruguz/osf.io,lyndsysimon/osf.io,TomHeatwole/osf.io,binoculars/osf.io,binoculars/osf.io,KAsante95/osf.io,revanthkolli/osf.io,monikagrabowska/osf.io,crcresearch/osf.io,caseyrygt/osf.io,ticklemepierce/osf.io,alexschiller/osf.io,bdyetton/prettychart,acshi/osf.io,doublebits/osf.io,alexschiller/osf.io,ZobairAlijan/osf.io,amyshi188/osf.io,TomBaxter/osf.io,dplorimer/osf,kch8qx/osf.io,GaryKriebel/osf.io,mluke93/osf.io,Johnetordoff/osf.io,jmcarp/osf.io,bdyetton/prettychart,barbour-em/osf.io,jmcarp/osf.io,asanfilippo7/osf.io,fabianvf/osf.io,jnayak1/osf.io,CenterForOpenScience/osf.io,HarryRybacki/osf.io,mluo613/osf.io,GageGaskins/osf.io,sbt9uc/osf.io,njantrania/osf.io,wearpants/osf.io,adlius/osf.io,TomBaxter/osf.io,kwierman/osf.io,revanthkolli/osf.io,CenterForOpenScience/osf.io,haoyuchen1992/osf.io,kushG/osf.io,wearpants/osf.io,zamattiac/osf.io,danielneis/osf.io,samchrisinger/osf.io,cslzchen/osf.io,adlius/osf.io,amyshi188/osf.io,chrisseto/osf.io,reinaH/osf.io,SSJohns/osf.io,jolene-esposito/osf.io,cwisecarver/osf.io,AndrewSallans/osf.io,zkraime/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,GaryKriebel/osf.io,himanshuo/osf.io,hmoco/osf.io,samanehsan/osf.io,barbour-em/osf.io,brandonPurvis/osf.io,reinaH/osf.io,mluke93/osf.io,monikagrabowska/osf.io,amyshi188/osf.io,ZobairAlijan/osf.io,wearpants/osf.io,laurenrevere/osf.io,pattisdr/osf.io,Ghalko/osf.io,himanshuo/osf.io,mluke93/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,wearpants/osf.io,mluo613/osf.io,mfraezz/osf.io,MerlinZhang/osf.io,samchrisinger/osf.io,billyhunt/osf.io,jmcarp/osf.io,TomBaxter/osf.io,jinluyuan/osf.io,fabianvf/osf.io,brandonPurvis/osf.io,emetsger/osf.io,jolene-esposito/osf.io,TomHeatwole/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,chennan47/osf.io,abought/osf.io,cldershem/osf.io,KAsante95/osf.io,jeffreyliu3230/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,aaxelb/osf.io,amyshi188/osf.io,AndrewSallans/osf.io,mattclark/osf.io,adlius/osf.io,acshi/osf.io,cslzchen/osf.io,kch8qx/osf.io,CenterForOpenScience/osf.io,kch8qx/osf.io,cldershem/osf.io,HarryRybacki/osf.io,cosenal/osf.io,zachjanicki/osf.io,HarryRybacki/osf.io,lamdnhan/osf.io,sloria/osf.io,HalcyonChimera/osf.io,abought/osf.io,himanshuo/osf.io,alexschiller/osf.io,lyndsysimon/osf.io,billyhunt/osf.io,njantrania/osf.io,bdyetton/prettychart,rdhyee/osf.io,caseyrygt/osf.io,abought/osf.io,jolene-esposito/osf.io,leb2dg/osf.io,adlius/osf.io,sloria/osf.io,ZobairAlijan/osf.io,zkraime/osf.io,leb2dg/osf.io,caneruguz/osf.io,dplorimer/osf,erinspace/osf.io,crcresearch/osf.io,danielneis/osf.io,GageGaskins/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,kch8qx/osf.io,mfraezz/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,KAsante95/osf.io,caneruguz/osf.io,sbt9uc/osf.io,billyhunt/osf.io,sbt9uc/osf.io,HarryRybacki/osf.io,monikagrabowska/osf.io,petermalcolm/osf.io,mluke93/osf.io,dplorimer/osf,HalcyonChimera/osf.io,billyhunt/osf.io,haoyuchen1992/osf.io,hmoco/osf.io,zamattiac/osf.io,felliott/osf.io,acshi/osf.io,mluo613/osf.io,caseyrollins/osf.io,himanshuo/osf.io,acshi/osf.io,GaryKriebel/osf.io,fabianvf/osf.io,dplorimer/osf,pattisdr/osf.io,icereval/osf.io,saradbowman/osf.io,danielneis/osf.io,Nesiehr/osf.io,mluo613/osf.io,icereval/osf.io,kch8qx/osf.io,brandonPurvis/osf.io,chrisseto/osf.io,caseyrygt/osf.io,cosenal/osf.io,hmoco/osf.io,icereval/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,ticklemepierce/osf.io,caseyrollins/osf.io,monikagrabowska/osf.io,zkraime/osf.io,samchrisinger/osf.io,brandonPurvis/osf.io,ZobairAlijan/osf.io,zkraime/osf.io,KAsante95/osf.io,jeffreyliu3230/osf.io,chennan47/osf.io,jmcarp/osf.io
--- +++ @@ -0,0 +1,71 @@ +"""Removes legacy Tag objects from the Guid namespace. + +Tags were once GuidStoredObjects, but are no longer. The Guid table was not +cleaned of these references. + +This caused a specific issue where "project" was a Tag id, and therefore was +resolveable to a Guid object, thereby breaking our routing system for URLs +beginning with /project/. + +This script must be run from the OSF root directory for the imports to work. +:: + + $ python -m scripts.consistency.fix_tag_guids dry + $ python -m scripts.consistency.fix_tag_guids +""" +import sys + +from nose.tools import * # noqa + +from framework import Q +from framework.guid.model import Guid +from website.app import init_app + +from tests.base import OsfTestCase +from tests.factories import TagFactory, NodeFactory + +QUERY = Q('referent.1', 'eq', "tag") + +def main(): + # Set up storage backends + init_app(routes=False) + targets = get_targets() + if 'dry' in sys.argv: + print('{n} invalid GUID objects will be removed.'.format(n=targets.count())) + sys.exit(0) + else: + do_migration() + if get_targets().count() == 0: + print('All invalid references removed.') + else: + print('Failed to remove all references.') + sys.exit(1) + +def do_migration(): + Guid.remove(QUERY) + +def get_targets(): + return Guid.find(QUERY) + +class TestMigrateLegacyTagGUIDObjects(OsfTestCase): + + def setUp(self): + OsfTestCase.setUp(self) + tag = TagFactory() + self.target_guid = Guid(referent=tag) + self.target_guid.save() + self.nontarget_guid = Guid(referent=NodeFactory()) + + def test_get_targets(self): + result = list(get_targets()) + assert_in(self.target_guid, result) + assert_not_in(self.nontarget_guid, result) + + def test_do_migration(self): + # sanity check + assert_equal(len(list(get_targets())), 1) + do_migration() + assert_equal(len(list(get_targets())), 0) + +if __name__ == '__main__': + main()
efc21569590e90bddaf9d06ea3747f3dd3476253
aqt/utils/common.py
aqt/utils/common.py
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """General util functions commonly used across different models.""" def get_fp_spec(sig_bit, exp_bit): """Create fp spec which defines precision for floating-point quantization. Args: sig_bit: the number of bits assigned for significand. exp_bit: the number of bits assigned for exponent. Returns: fp spec """ exp_bound = 2**(exp_bit - 1) - 1 prec = {'exp_min': -exp_bound, 'exp_max': exp_bound, 'sig_bits': sig_bit} return prec
Create a new util function that computes precision for floating-point quantization.
Create a new util function that computes precision for floating-point quantization. PiperOrigin-RevId: 395302655
Python
apache-2.0
google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research,google-research/google-research
--- +++ @@ -0,0 +1,31 @@ +# coding=utf-8 +# Copyright 2021 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""General util functions commonly used across different models.""" + + +def get_fp_spec(sig_bit, exp_bit): + """Create fp spec which defines precision for floating-point quantization. + + Args: + sig_bit: the number of bits assigned for significand. + exp_bit: the number of bits assigned for exponent. + + Returns: + fp spec + """ + exp_bound = 2**(exp_bit - 1) - 1 + prec = {'exp_min': -exp_bound, 'exp_max': exp_bound, 'sig_bits': sig_bit} + return prec
3209a38b795cb5519f92bbfc2651df5b69ba0f76
moderation_queue/migrations/0008_add_ignore_to_decision_choices.py
moderation_queue/migrations/0008_add_ignore_to_decision_choices.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('moderation_queue', '0007_auto_20150303_1420'), ] operations = [ migrations.AlterField( model_name='queuedimage', name='decision', field=models.CharField(default=b'undecided', max_length=32, choices=[(b'approved', b'Approved'), (b'rejected', b'Rejected'), (b'undecided', b'Undecided'), (b'ignore', b'Ignore')]), preserve_default=True, ), ]
Add a forgotten migration (to add 'ignore' as a decision choice)
Add a forgotten migration (to add 'ignore' as a decision choice) This should have gone into d5086f1d74d448
Python
agpl-3.0
mysociety/yournextmp-popit,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,openstate/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,neavouli/yournextrepresentative,datamade/yournextmp-popit
--- +++ @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('moderation_queue', '0007_auto_20150303_1420'), + ] + + operations = [ + migrations.AlterField( + model_name='queuedimage', + name='decision', + field=models.CharField(default=b'undecided', max_length=32, choices=[(b'approved', b'Approved'), (b'rejected', b'Rejected'), (b'undecided', b'Undecided'), (b'ignore', b'Ignore')]), + preserve_default=True, + ), + ]
883707309447fa4edd47459b4f2d8e7d449afd41
array/80.py
array/80.py
class Solution: def removeDuplicates(self, nums): """ :type nums: List[int] :rtype: int """ if not nums: return 0 length = len(nums) pre = 0 cur = 1 flag = False #False 1连续 True 2连续 while cur < length: if nums[pre] == nums[cur]: if flag: nums.pop(cur) length -= 1 else: flag = True pre += 1 cur += 1 else: flag = False pre += 1 cur += 1 return len(nums)
Remove Duplicates from Sorted Array II
Remove Duplicates from Sorted Array II
Python
apache-2.0
MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode
--- +++ @@ -0,0 +1,33 @@ +class Solution: + def removeDuplicates(self, nums): + """ + :type nums: List[int] + :rtype: int + """ + + if not nums: + return 0 + length = len(nums) + pre = 0 + cur = 1 + flag = False #False 1连续 True 2连续 + while cur < length: + if nums[pre] == nums[cur]: + if flag: + nums.pop(cur) + length -= 1 + else: + flag = True + pre += 1 + cur += 1 + else: + flag = False + pre += 1 + cur += 1 + + return len(nums) + + + + +
087ba4c6bb7f268eb11584e4dbcf449e08fcaf0b
analysis/10-extract-jacobian-chunks.py
analysis/10-extract-jacobian-chunks.py
import climate import joblib import numpy as np def extract(trial, output, frames): dirname = os.path.join(output, trial.subject.key) pattern = '{}-{}-{{}}.npy'.format(trial.block.key, trial.key) if not os.path.isdir(dirname): os.makedirs(dirname) def save(key, arr): out = os.path.join(dirname, pattern.format(key)) logging.info('%s: %s', out, arr.shape) np.save(out, arr.values) trial.load() for m in trial.marker_channel_columns: trial.df[m] = trial.df[m].interpolate() body = lmj.cubes.Trial(trial.parent, trial.basename) body.df = trial.df.copy() body.make_body_relative() body.add_velocities() body = body.df[body.marker_channel_columns] _, jac = trial.jacobian(frames) start = frames for i, end in enumerate(trial.df.target.diff(1).nonzero()[0][1:]): save('body-{:02d}'.format(i), body.iloc[start:end]) save('jac-{:02d}'.format(i), jac.iloc[start:end]) start = end + frames def main(root, output, frames=10): trials = lmj.cubes.Experiment(root).trials_matching('*') work = joblib.delayed(extract) joblib.Parallel(-1)(work(t, output, frames) for t in trials) if __name__ == '__main__': climate.call(main)
Add a script for saving paired posture/jacobian arrays.
Add a script for saving paired posture/jacobian arrays.
Python
mit
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
--- +++ @@ -0,0 +1,43 @@ +import climate +import joblib +import numpy as np + + +def extract(trial, output, frames): + dirname = os.path.join(output, trial.subject.key) + pattern = '{}-{}-{{}}.npy'.format(trial.block.key, trial.key) + if not os.path.isdir(dirname): + os.makedirs(dirname) + + def save(key, arr): + out = os.path.join(dirname, pattern.format(key)) + logging.info('%s: %s', out, arr.shape) + np.save(out, arr.values) + + trial.load() + for m in trial.marker_channel_columns: + trial.df[m] = trial.df[m].interpolate() + + body = lmj.cubes.Trial(trial.parent, trial.basename) + body.df = trial.df.copy() + body.make_body_relative() + body.add_velocities() + body = body.df[body.marker_channel_columns] + + _, jac = trial.jacobian(frames) + + start = frames + for i, end in enumerate(trial.df.target.diff(1).nonzero()[0][1:]): + save('body-{:02d}'.format(i), body.iloc[start:end]) + save('jac-{:02d}'.format(i), jac.iloc[start:end]) + start = end + frames + + +def main(root, output, frames=10): + trials = lmj.cubes.Experiment(root).trials_matching('*') + work = joblib.delayed(extract) + joblib.Parallel(-1)(work(t, output, frames) for t in trials) + + +if __name__ == '__main__': + climate.call(main)
7b33ea38283c9e9f00a2aacaa17634e50e55e42b
stationspinner/accounting/migrations/0005_auto_20150919_2207.py
stationspinner/accounting/migrations/0005_auto_20150919_2207.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.core.validators import django.contrib.auth.models class Migration(migrations.Migration): dependencies = [ ('accounting', '0004_apikey_brokeness'), ] operations = [ migrations.AlterModelManagers( name='capsuler', managers=[ ('objects', django.contrib.auth.models.UserManager()), ], ), migrations.AlterField( model_name='capsuler', name='email', field=models.EmailField(max_length=254, verbose_name='email address', blank=True), ), migrations.AlterField( model_name='capsuler', name='groups', field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'), ), migrations.AlterField( model_name='capsuler', name='last_login', field=models.DateTimeField(null=True, verbose_name='last login', blank=True), ), migrations.AlterField( model_name='capsuler', name='username', field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'), ), ]
Migrate auth for django 1.8
Migrate auth for django 1.8
Python
agpl-3.0
kriberg/stationspinner,kriberg/stationspinner
--- +++ @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations +import django.core.validators +import django.contrib.auth.models + + +class Migration(migrations.Migration): + + dependencies = [ + ('accounting', '0004_apikey_brokeness'), + ] + + operations = [ + migrations.AlterModelManagers( + name='capsuler', + managers=[ + ('objects', django.contrib.auth.models.UserManager()), + ], + ), + migrations.AlterField( + model_name='capsuler', + name='email', + field=models.EmailField(max_length=254, verbose_name='email address', blank=True), + ), + migrations.AlterField( + model_name='capsuler', + name='groups', + field=models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', verbose_name='groups'), + ), + migrations.AlterField( + model_name='capsuler', + name='last_login', + field=models.DateTimeField(null=True, verbose_name='last login', blank=True), + ), + migrations.AlterField( + model_name='capsuler', + name='username', + field=models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=30, validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username. This value may contain only letters, numbers and @/./+/-/_ characters.', 'invalid')], help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, verbose_name='username'), + ), + ]
23b4ad3b028e674307fdc6cc7a72953150fd0be3
zephyr/management/commands/check_redis.py
zephyr/management/commands/check_redis.py
from __future__ import absolute_import from zephyr.models import UserProfile, get_user_profile_by_id from zephyr.lib.rate_limiter import redis_key, client, max_api_calls, max_api_window from django.core.management.base import BaseCommand from django.conf import settings from optparse import make_option import os, time, logging class Command(BaseCommand): help = """Checks redis to make sure our rate limiting system hasn't grown a bug and left redis with a bunch of data Usage: ./manage.py [--trim] check_redis""" option_list = BaseCommand.option_list + ( make_option('-t', '--trim', dest='trim', default=False, action='store_true', help="Actually trim excess"), ) def _check_within_range(self, key, count_func, trim_func): user_id = int(key.split(':')[1]) try: user = get_user_profile_by_id(user_id) except: user = None max_calls = max_api_calls(user=user) count = count_func() if count > max_calls: logging.error("Redis health check found key with more elements \ than max_api_calls! (trying to trim) %s %s" % (key, count)) client.expire(key, max_api_window(user=user)) if self.trim: trim_func(key, max_calls) def handle(self, *args, **options): if not settings.RATE_LIMITING: print "This machine is not using redis or rate limiting, aborting" exit(1) # Find all keys, and make sure they're all within size constraints wildcard_list = "ratelimit:*:*:list" wildcard_zset = "ratelimit:*:*:zset" self.trim = options['trim'] lists = client.keys(wildcard_list) for list_name in lists: self._check_within_range(list_name, lambda: client.llen(list_name), lambda key, max_calls: client.ltrim(key, 0, max_calls - 1)) zsets = client.keys(wildcard_zset) for zset in zsets: now = time.time() # We can warn on our zset being too large, but we don't know what # elements to trim. We'd have to go through every list item and take # the intersection. The best we can do is expire it self._check_within_range(zset, lambda: client.zcount(zset, 0, now), lambda key, max_calls: None)
Add a redis_check management command
Add a redis_check management command (imported from commit 04a272ca8d8288f7e3b1a54fd5d73629bde938a0)
Python
apache-2.0
bitemyapp/zulip,cosmicAsymmetry/zulip,mahim97/zulip,jessedhillon/zulip,esander91/zulip,Diptanshu8/zulip,JPJPJPOPOP/zulip,akuseru/zulip,RobotCaleb/zulip,m1ssou/zulip,ApsOps/zulip,AZtheAsian/zulip,themass/zulip,vabs22/zulip,zhaoweigg/zulip,wangdeshui/zulip,rishig/zulip,alliejones/zulip,ashwinirudrappa/zulip,Gabriel0402/zulip,zorojean/zulip,Galexrt/zulip,ApsOps/zulip,j831/zulip,zhaoweigg/zulip,paxapy/zulip,ApsOps/zulip,amallia/zulip,PhilSk/zulip,Galexrt/zulip,jackrzhang/zulip,shubhamdhama/zulip,zwily/zulip,qq1012803704/zulip,voidException/zulip,joshisa/zulip,seapasulli/zulip,sonali0901/zulip,he15his/zulip,wdaher/zulip,kaiyuanheshang/zulip,shubhamdhama/zulip,vabs22/zulip,ryansnowboarder/zulip,KJin99/zulip,stamhe/zulip,hafeez3000/zulip,dnmfarrell/zulip,dnmfarrell/zulip,pradiptad/zulip,PhilSk/zulip,Cheppers/zulip,peguin40/zulip,avastu/zulip,hengqujushi/zulip,kaiyuanheshang/zulip,bssrdf/zulip,themass/zulip,rishig/zulip,zacps/zulip,willingc/zulip,wweiradio/zulip,babbage/zulip,souravbadami/zulip,jonesgithub/zulip,shaunstanislaus/zulip,mdavid/zulip,deer-hope/zulip,eastlhu/zulip,tommyip/zulip,LAndreas/zulip,littledogboy/zulip,jrowan/zulip,tiansiyuan/zulip,hafeez3000/zulip,Qgap/zulip,susansls/zulip,wangdeshui/zulip,brockwhittaker/zulip,jimmy54/zulip,nicholasbs/zulip,jessedhillon/zulip,jessedhillon/zulip,krtkmj/zulip,zofuthan/zulip,aps-sids/zulip,suxinde2009/zulip,mdavid/zulip,bitemyapp/zulip,bastianh/zulip,rishig/zulip,eeshangarg/zulip,suxinde2009/zulip,cosmicAsymmetry/zulip,MayB/zulip,saitodisse/zulip,johnny9/zulip,showell/zulip,Drooids/zulip,dhcrzf/zulip,dhcrzf/zulip,Jianchun1/zulip,moria/zulip,nicholasbs/zulip,arpith/zulip,proliming/zulip,ikasumiwt/zulip,deer-hope/zulip,sonali0901/zulip,xuxiao/zulip,arpitpanwar/zulip,easyfmxu/zulip,voidException/zulip,technicalpickles/zulip,firstblade/zulip,deer-hope/zulip,reyha/zulip,hafeez3000/zulip,akuseru/zulip,stamhe/zulip,JPJPJPOPOP/zulip,xuxiao/zulip,timabbott/zulip,dattatreya303/zulip,xuanhan863/zulip,hafeez3000/zulip,mahim97/zulip,ericzhou2008/zulip,kokoar/zulip,udxxabp/zulip,voidException/zulip,paxapy/zulip,rishig/zulip,suxinde2009/zulip,johnny9/zulip,vaidap/zulip,pradiptad/zulip,ryanbackman/zulip,j831/zulip,MayB/zulip,aliceriot/zulip,dwrpayne/zulip,mohsenSy/zulip,MariaFaBella85/zulip,souravbadami/zulip,shaunstanislaus/zulip,mansilladev/zulip,proliming/zulip,christi3k/zulip,PhilSk/zulip,Suninus/zulip,gigawhitlocks/zulip,JPJPJPOPOP/zulip,wangdeshui/zulip,vikas-parashar/zulip,grave-w-grave/zulip,amyliu345/zulip,shubhamdhama/zulip,ryanbackman/zulip,Galexrt/zulip,j831/zulip,Frouk/zulip,moria/zulip,gkotian/zulip,natanovia/zulip,hayderimran7/zulip,tbutter/zulip,alliejones/zulip,zhaoweigg/zulip,Frouk/zulip,Suninus/zulip,schatt/zulip,JanzTam/zulip,grave-w-grave/zulip,amanharitsh123/zulip,shrikrishnaholla/zulip,easyfmxu/zulip,calvinleenyc/zulip,jainayush975/zulip,souravbadami/zulip,thomasboyt/zulip,tbutter/zulip,Diptanshu8/zulip,peiwei/zulip,babbage/zulip,tdr130/zulip,ufosky-server/zulip,deer-hope/zulip,deer-hope/zulip,jeffcao/zulip,hafeez3000/zulip,TigorC/zulip,umkay/zulip,peiwei/zulip,jphilipsen05/zulip,vabs22/zulip,vaidap/zulip,gkotian/zulip,KingxBanana/zulip,willingc/zulip,zofuthan/zulip,vikas-parashar/zulip,TigorC/zulip,nicholasbs/zulip,tommyip/zulip,levixie/zulip,SmartPeople/zulip,avastu/zulip,easyfmxu/zulip,Qgap/zulip,punchagan/zulip,hayderimran7/zulip,moria/zulip,codeKonami/zulip,paxapy/zulip,SmartPeople/zulip,zwily/zulip,voidException/zulip,Juanvulcano/zulip,hackerkid/zulip,zwily/zulip,jeffcao/zulip,sup95/zulip,atomic-labs/zulip,kaiyuanheshang/zulip,Jianchun1/zulip,hustlzp/zulip,brainwane/zulip,codeKonami/zulip,xuanhan863/zulip,jainayush975/zulip,ryanbackman/zulip,JanzTam/zulip,blaze225/zulip,jerryge/zulip,wangdeshui/zulip,proliming/zulip,EasonYi/zulip,shrikrishnaholla/zulip,samatdav/zulip,LAndreas/zulip,zulip/zulip,developerfm/zulip,schatt/zulip,glovebx/zulip,bastianh/zulip,tbutter/zulip,qq1012803704/zulip,Juanvulcano/zulip,zachallaun/zulip,wdaher/zulip,tommyip/zulip,vikas-parashar/zulip,samatdav/zulip,technicalpickles/zulip,udxxabp/zulip,amyliu345/zulip,zhaoweigg/zulip,tiansiyuan/zulip,zulip/zulip,dwrpayne/zulip,brainwane/zulip,bowlofstew/zulip,Juanvulcano/zulip,dwrpayne/zulip,ufosky-server/zulip,brockwhittaker/zulip,KingxBanana/zulip,vakila/zulip,Drooids/zulip,RobotCaleb/zulip,Suninus/zulip,Frouk/zulip,aliceriot/zulip,rht/zulip,brockwhittaker/zulip,armooo/zulip,littledogboy/zulip,rishig/zulip,Juanvulcano/zulip,guiquanz/zulip,Batterfii/zulip,showell/zulip,hj3938/zulip,vikas-parashar/zulip,amanharitsh123/zulip,dotcool/zulip,Cheppers/zulip,babbage/zulip,joshisa/zulip,jackrzhang/zulip,proliming/zulip,bastianh/zulip,so0k/zulip,vikas-parashar/zulip,brainwane/zulip,praveenaki/zulip,blaze225/zulip,Batterfii/zulip,Drooids/zulip,jackrzhang/zulip,schatt/zulip,KJin99/zulip,stamhe/zulip,amanharitsh123/zulip,andersk/zulip,hengqujushi/zulip,RobotCaleb/zulip,isht3/zulip,dhcrzf/zulip,gigawhitlocks/zulip,tommyip/zulip,EasonYi/zulip,zorojean/zulip,peguin40/zulip,ikasumiwt/zulip,so0k/zulip,tiansiyuan/zulip,swinghu/zulip,JanzTam/zulip,qq1012803704/zulip,souravbadami/zulip,aakash-cr7/zulip,jimmy54/zulip,krtkmj/zulip,johnnygaddarr/zulip,glovebx/zulip,guiquanz/zulip,cosmicAsymmetry/zulip,Batterfii/zulip,codeKonami/zulip,zofuthan/zulip,jrowan/zulip,fw1121/zulip,umkay/zulip,ericzhou2008/zulip,developerfm/zulip,developerfm/zulip,JanzTam/zulip,levixie/zulip,mahim97/zulip,MariaFaBella85/zulip,aps-sids/zulip,dxq-git/zulip,hj3938/zulip,wweiradio/zulip,mdavid/zulip,shrikrishnaholla/zulip,dxq-git/zulip,firstblade/zulip,aakash-cr7/zulip,gkotian/zulip,TigorC/zulip,dwrpayne/zulip,JPJPJPOPOP/zulip,natanovia/zulip,ryansnowboarder/zulip,codeKonami/zulip,isht3/zulip,swinghu/zulip,seapasulli/zulip,susansls/zulip,levixie/zulip,dawran6/zulip,dnmfarrell/zulip,jerryge/zulip,themass/zulip,developerfm/zulip,andersk/zulip,Vallher/zulip,hayderimran7/zulip,dxq-git/zulip,eeshangarg/zulip,pradiptad/zulip,technicalpickles/zulip,thomasboyt/zulip,gigawhitlocks/zulip,moria/zulip,johnny9/zulip,shaunstanislaus/zulip,dattatreya303/zulip,JanzTam/zulip,glovebx/zulip,zhaoweigg/zulip,adnanh/zulip,kokoar/zulip,johnny9/zulip,babbage/zulip,EasonYi/zulip,akuseru/zulip,synicalsyntax/zulip,KJin99/zulip,Drooids/zulip,Juanvulcano/zulip,samatdav/zulip,itnihao/zulip,brockwhittaker/zulip,sup95/zulip,jeffcao/zulip,natanovia/zulip,MariaFaBella85/zulip,tdr130/zulip,atomic-labs/zulip,arpith/zulip,andersk/zulip,ufosky-server/zulip,deer-hope/zulip,DazWorrall/zulip,TigorC/zulip,joshisa/zulip,peiwei/zulip,Batterfii/zulip,Vallher/zulip,zacps/zulip,yocome/zulip,Gabriel0402/zulip,umkay/zulip,grave-w-grave/zulip,KJin99/zulip,eastlhu/zulip,amyliu345/zulip,LeeRisk/zulip,kaiyuanheshang/zulip,zulip/zulip,noroot/zulip,easyfmxu/zulip,joyhchen/zulip,qq1012803704/zulip,verma-varsha/zulip,dotcool/zulip,LeeRisk/zulip,MayB/zulip,qq1012803704/zulip,arpitpanwar/zulip,jackrzhang/zulip,Diptanshu8/zulip,guiquanz/zulip,wweiradio/zulip,PaulPetring/zulip,christi3k/zulip,sup95/zulip,thomasboyt/zulip,niftynei/zulip,kou/zulip,xuxiao/zulip,amallia/zulip,littledogboy/zulip,yocome/zulip,praveenaki/zulip,jimmy54/zulip,alliejones/zulip,rht/zulip,eastlhu/zulip,souravbadami/zulip,wangdeshui/zulip,vakila/zulip,saitodisse/zulip,christi3k/zulip,luyifan/zulip,arpith/zulip,bowlofstew/zulip,verma-varsha/zulip,ahmadassaf/zulip,ipernet/zulip,proliming/zulip,kou/zulip,arpith/zulip,zofuthan/zulip,yuvipanda/zulip,mahim97/zulip,andersk/zulip,timabbott/zulip,Galexrt/zulip,tdr130/zulip,noroot/zulip,udxxabp/zulip,thomasboyt/zulip,samatdav/zulip,atomic-labs/zulip,johnnygaddarr/zulip,peguin40/zulip,LAndreas/zulip,luyifan/zulip,wweiradio/zulip,karamcnair/zulip,avastu/zulip,JPJPJPOPOP/zulip,esander91/zulip,gigawhitlocks/zulip,littledogboy/zulip,mdavid/zulip,vaidap/zulip,xuxiao/zulip,luyifan/zulip,shubhamdhama/zulip,DazWorrall/zulip,mdavid/zulip,ashwinirudrappa/zulip,levixie/zulip,krtkmj/zulip,johnnygaddarr/zulip,willingc/zulip,thomasboyt/zulip,dawran6/zulip,dwrpayne/zulip,aliceriot/zulip,mansilladev/zulip,adnanh/zulip,JanzTam/zulip,KingxBanana/zulip,amanharitsh123/zulip,bowlofstew/zulip,AZtheAsian/zulip,punchagan/zulip,hayderimran7/zulip,udxxabp/zulip,eeshangarg/zulip,PaulPetring/zulip,bitemyapp/zulip,joyhchen/zulip,jphilipsen05/zulip,wdaher/zulip,huangkebo/zulip,alliejones/zulip,punchagan/zulip,punchagan/zulip,hafeez3000/zulip,wavelets/zulip,dxq-git/zulip,Diptanshu8/zulip,easyfmxu/zulip,dotcool/zulip,j831/zulip,jonesgithub/zulip,pradiptad/zulip,xuanhan863/zulip,dnmfarrell/zulip,sup95/zulip,fw1121/zulip,technicalpickles/zulip,moria/zulip,RobotCaleb/zulip,gkotian/zulip,peiwei/zulip,kou/zulip,easyfmxu/zulip,verma-varsha/zulip,eeshangarg/zulip,vikas-parashar/zulip,tbutter/zulip,mohsenSy/zulip,he15his/zulip,zofuthan/zulip,dhcrzf/zulip,schatt/zulip,m1ssou/zulip,rht/zulip,eeshangarg/zulip,SmartPeople/zulip,Diptanshu8/zulip,synicalsyntax/zulip,hackerkid/zulip,ahmadassaf/zulip,punchagan/zulip,dhcrzf/zulip,ashwinirudrappa/zulip,zulip/zulip,LeeRisk/zulip,sonali0901/zulip,zachallaun/zulip,LeeRisk/zulip,zacps/zulip,johnny9/zulip,gkotian/zulip,qq1012803704/zulip,rht/zulip,technicalpickles/zulip,Qgap/zulip,ryansnowboarder/zulip,umkay/zulip,dawran6/zulip,hj3938/zulip,wavelets/zulip,technicalpickles/zulip,KingxBanana/zulip,zulip/zulip,reyha/zulip,j831/zulip,suxinde2009/zulip,PaulPetring/zulip,kokoar/zulip,aliceriot/zulip,akuseru/zulip,verma-varsha/zulip,peiwei/zulip,huangkebo/zulip,gkotian/zulip,Cheppers/zulip,dnmfarrell/zulip,bastianh/zulip,zorojean/zulip,joshisa/zulip,nicholasbs/zulip,vabs22/zulip,ashwinirudrappa/zulip,cosmicAsymmetry/zulip,ericzhou2008/zulip,pradiptad/zulip,swinghu/zulip,dotcool/zulip,aps-sids/zulip,udxxabp/zulip,ahmadassaf/zulip,MariaFaBella85/zulip,Batterfii/zulip,hackerkid/zulip,ryansnowboarder/zulip,shubhamdhama/zulip,aps-sids/zulip,peguin40/zulip,gkotian/zulip,DazWorrall/zulip,nicholasbs/zulip,ahmadassaf/zulip,ikasumiwt/zulip,schatt/zulip,Galexrt/zulip,levixie/zulip,wangdeshui/zulip,natanovia/zulip,synicalsyntax/zulip,synicalsyntax/zulip,alliejones/zulip,andersk/zulip,eastlhu/zulip,kokoar/zulip,gigawhitlocks/zulip,calvinleenyc/zulip,saitodisse/zulip,TigorC/zulip,KJin99/zulip,rht/zulip,suxinde2009/zulip,akuseru/zulip,stamhe/zulip,hj3938/zulip,udxxabp/zulip,kaiyuanheshang/zulip,saitodisse/zulip,tbutter/zulip,themass/zulip,jeffcao/zulip,pradiptad/zulip,dawran6/zulip,atomic-labs/zulip,JPJPJPOPOP/zulip,Gabriel0402/zulip,sharmaeklavya2/zulip,kou/zulip,tiansiyuan/zulip,susansls/zulip,christi3k/zulip,jerryge/zulip,ahmadassaf/zulip,TigorC/zulip,wdaher/zulip,tommyip/zulip,proliming/zulip,zofuthan/zulip,susansls/zulip,zacps/zulip,yocome/zulip,luyifan/zulip,joyhchen/zulip,huangkebo/zulip,swinghu/zulip,wdaher/zulip,DazWorrall/zulip,amallia/zulip,glovebx/zulip,synicalsyntax/zulip,jackrzhang/zulip,xuanhan863/zulip,dxq-git/zulip,bluesea/zulip,EasonYi/zulip,DazWorrall/zulip,jrowan/zulip,ApsOps/zulip,zachallaun/zulip,stamhe/zulip,arpitpanwar/zulip,glovebx/zulip,tdr130/zulip,huangkebo/zulip,karamcnair/zulip,mdavid/zulip,huangkebo/zulip,ryanbackman/zulip,showell/zulip,hustlzp/zulip,m1ssou/zulip,blaze225/zulip,he15his/zulip,developerfm/zulip,Qgap/zulip,suxinde2009/zulip,jessedhillon/zulip,ikasumiwt/zulip,zorojean/zulip,souravbadami/zulip,lfranchi/zulip,zwily/zulip,jainayush975/zulip,Gabriel0402/zulip,willingc/zulip,hj3938/zulip,willingc/zulip,avastu/zulip,seapasulli/zulip,samatdav/zulip,easyfmxu/zulip,praveenaki/zulip,mansilladev/zulip,lfranchi/zulip,hackerkid/zulip,zwily/zulip,guiquanz/zulip,zofuthan/zulip,dnmfarrell/zulip,kokoar/zulip,jphilipsen05/zulip,sharmaeklavya2/zulip,so0k/zulip,MariaFaBella85/zulip,themass/zulip,noroot/zulip,esander91/zulip,jainayush975/zulip,adnanh/zulip,seapasulli/zulip,arpitpanwar/zulip,tbutter/zulip,yuvipanda/zulip,niftynei/zulip,hayderimran7/zulip,isht3/zulip,Drooids/zulip,m1ssou/zulip,he15his/zulip,eeshangarg/zulip,AZtheAsian/zulip,ipernet/zulip,shrikrishnaholla/zulip,sharmaeklavya2/zulip,yuvipanda/zulip,yuvipanda/zulip,zhaoweigg/zulip,noroot/zulip,ericzhou2008/zulip,wweiradio/zulip,cosmicAsymmetry/zulip,stamhe/zulip,firstblade/zulip,adnanh/zulip,so0k/zulip,amallia/zulip,Galexrt/zulip,aakash-cr7/zulip,ipernet/zulip,johnnygaddarr/zulip,calvinleenyc/zulip,ahmadassaf/zulip,Jianchun1/zulip,moria/zulip,grave-w-grave/zulip,dattatreya303/zulip,bitemyapp/zulip,wdaher/zulip,bitemyapp/zulip,fw1121/zulip,isht3/zulip,PaulPetring/zulip,bowlofstew/zulip,jonesgithub/zulip,niftynei/zulip,willingc/zulip,udxxabp/zulip,aakash-cr7/zulip,PaulPetring/zulip,Cheppers/zulip,calvinleenyc/zulip,zachallaun/zulip,Qgap/zulip,hayderimran7/zulip,Jianchun1/zulip,ikasumiwt/zulip,gigawhitlocks/zulip,zacps/zulip,aps-sids/zulip,MariaFaBella85/zulip,praveenaki/zulip,joshisa/zulip,bastianh/zulip,aliceriot/zulip,MayB/zulip,arpitpanwar/zulip,joyhchen/zulip,sharmaeklavya2/zulip,Galexrt/zulip,fw1121/zulip,mdavid/zulip,armooo/zulip,amanharitsh123/zulip,firstblade/zulip,hengqujushi/zulip,saitodisse/zulip,atomic-labs/zulip,xuxiao/zulip,wavelets/zulip,schatt/zulip,bitemyapp/zulip,sup95/zulip,Gabriel0402/zulip,lfranchi/zulip,Suninus/zulip,avastu/zulip,johnnygaddarr/zulip,calvinleenyc/zulip,christi3k/zulip,m1ssou/zulip,EasonYi/zulip,krtkmj/zulip,Suninus/zulip,dnmfarrell/zulip,bluesea/zulip,voidException/zulip,Vallher/zulip,rht/zulip,lfranchi/zulip,Frouk/zulip,deer-hope/zulip,zulip/zulip,guiquanz/zulip,hengqujushi/zulip,vakila/zulip,so0k/zulip,brainwane/zulip,zacps/zulip,amallia/zulip,bluesea/zulip,mahim97/zulip,ericzhou2008/zulip,fw1121/zulip,zorojean/zulip,Frouk/zulip,KJin99/zulip,punchagan/zulip,ashwinirudrappa/zulip,timabbott/zulip,developerfm/zulip,ikasumiwt/zulip,vakila/zulip,Drooids/zulip,shubhamdhama/zulip,moria/zulip,itnihao/zulip,hj3938/zulip,jonesgithub/zulip,ericzhou2008/zulip,voidException/zulip,peiwei/zulip,firstblade/zulip,zachallaun/zulip,Frouk/zulip,yocome/zulip,codeKonami/zulip,LAndreas/zulip,wweiradio/zulip,bssrdf/zulip,johnnygaddarr/zulip,bowlofstew/zulip,jonesgithub/zulip,yuvipanda/zulip,swinghu/zulip,esander91/zulip,AZtheAsian/zulip,saitodisse/zulip,stamhe/zulip,yocome/zulip,Vallher/zulip,showell/zulip,Cheppers/zulip,Juanvulcano/zulip,ryanbackman/zulip,hustlzp/zulip,akuseru/zulip,swinghu/zulip,arpitpanwar/zulip,hustlzp/zulip,mohsenSy/zulip,grave-w-grave/zulip,wavelets/zulip,KingxBanana/zulip,jainayush975/zulip,saitodisse/zulip,LAndreas/zulip,so0k/zulip,jphilipsen05/zulip,MayB/zulip,luyifan/zulip,dattatreya303/zulip,vaidap/zulip,yuvipanda/zulip,jimmy54/zulip,jphilipsen05/zulip,avastu/zulip,huangkebo/zulip,bastianh/zulip,karamcnair/zulip,vakila/zulip,umkay/zulip,mohsenSy/zulip,dwrpayne/zulip,qq1012803704/zulip,luyifan/zulip,brainwane/zulip,so0k/zulip,atomic-labs/zulip,dwrpayne/zulip,sonali0901/zulip,shrikrishnaholla/zulip,thomasboyt/zulip,dhcrzf/zulip,amyliu345/zulip,avastu/zulip,ufosky-server/zulip,wangdeshui/zulip,bluesea/zulip,tdr130/zulip,wdaher/zulip,zwily/zulip,wweiradio/zulip,bastianh/zulip,reyha/zulip,isht3/zulip,esander91/zulip,hustlzp/zulip,vakila/zulip,themass/zulip,tiansiyuan/zulip,shaunstanislaus/zulip,Jianchun1/zulip,timabbott/zulip,kou/zulip,Qgap/zulip,codeKonami/zulip,kou/zulip,ApsOps/zulip,tiansiyuan/zulip,reyha/zulip,jonesgithub/zulip,arpith/zulip,jessedhillon/zulip,armooo/zulip,verma-varsha/zulip,PhilSk/zulip,bssrdf/zulip,bowlofstew/zulip,xuxiao/zulip,amyliu345/zulip,jackrzhang/zulip,krtkmj/zulip,jessedhillon/zulip,reyha/zulip,yocome/zulip,Cheppers/zulip,punchagan/zulip,joshisa/zulip,yocome/zulip,dattatreya303/zulip,noroot/zulip,babbage/zulip,EasonYi/zulip,jrowan/zulip,tommyip/zulip,KingxBanana/zulip,babbage/zulip,mahim97/zulip,zulip/zulip,reyha/zulip,littledogboy/zulip,RobotCaleb/zulip,proliming/zulip,timabbott/zulip,natanovia/zulip,dawran6/zulip,hafeez3000/zulip,xuxiao/zulip,shubhamdhama/zulip,dxq-git/zulip,niftynei/zulip,jimmy54/zulip,krtkmj/zulip,suxinde2009/zulip,themass/zulip,Suninus/zulip,dotcool/zulip,amyliu345/zulip,jackrzhang/zulip,littledogboy/zulip,tdr130/zulip,bluesea/zulip,jrowan/zulip,jainayush975/zulip,jonesgithub/zulip,atomic-labs/zulip,mansilladev/zulip,blaze225/zulip,jerryge/zulip,bssrdf/zulip,ashwinirudrappa/zulip,jimmy54/zulip,praveenaki/zulip,shrikrishnaholla/zulip,eastlhu/zulip,bssrdf/zulip,susansls/zulip,dhcrzf/zulip,jerryge/zulip,hengqujushi/zulip,fw1121/zulip,tdr130/zulip,xuanhan863/zulip,technicalpickles/zulip,tiansiyuan/zulip,schatt/zulip,PhilSk/zulip,paxapy/zulip,andersk/zulip,dawran6/zulip,amallia/zulip,nicholasbs/zulip,blaze225/zulip,xuanhan863/zulip,alliejones/zulip,lfranchi/zulip,sharmaeklavya2/zulip,showell/zulip,brockwhittaker/zulip,sharmaeklavya2/zulip,hj3938/zulip,jeffcao/zulip,vakila/zulip,j831/zulip,brockwhittaker/zulip,dxq-git/zulip,showell/zulip,armooo/zulip,amanharitsh123/zulip,esander91/zulip,brainwane/zulip,seapasulli/zulip,hackerkid/zulip,fw1121/zulip,seapasulli/zulip,rishig/zulip,glovebx/zulip,pradiptad/zulip,huangkebo/zulip,Vallher/zulip,bssrdf/zulip,JanzTam/zulip,SmartPeople/zulip,zachallaun/zulip,mohsenSy/zulip,tbutter/zulip,peiwei/zulip,praveenaki/zulip,praveenaki/zulip,adnanh/zulip,lfranchi/zulip,Suninus/zulip,hustlzp/zulip,Vallher/zulip,bluesea/zulip,SmartPeople/zulip,itnihao/zulip,rishig/zulip,rht/zulip,karamcnair/zulip,karamcnair/zulip,bowlofstew/zulip,lfranchi/zulip,isht3/zulip,zwily/zulip,LeeRisk/zulip,armooo/zulip,LeeRisk/zulip,synicalsyntax/zulip,eastlhu/zulip,SmartPeople/zulip,he15his/zulip,zachallaun/zulip,dattatreya303/zulip,esander91/zulip,ipernet/zulip,AZtheAsian/zulip,LAndreas/zulip,mansilladev/zulip,hengqujushi/zulip,mansilladev/zulip,grave-w-grave/zulip,voidException/zulip,showell/zulip,Cheppers/zulip,vabs22/zulip,akuseru/zulip,blaze225/zulip,bluesea/zulip,DazWorrall/zulip,gigawhitlocks/zulip,PaulPetring/zulip,ufosky-server/zulip,guiquanz/zulip,willingc/zulip,joshisa/zulip,samatdav/zulip,eeshangarg/zulip,Gabriel0402/zulip,timabbott/zulip,kou/zulip,tommyip/zulip,jrowan/zulip,cosmicAsymmetry/zulip,LeeRisk/zulip,shrikrishnaholla/zulip,hackerkid/zulip,MayB/zulip,ufosky-server/zulip,EasonYi/zulip,DazWorrall/zulip,hustlzp/zulip,sup95/zulip,armooo/zulip,synicalsyntax/zulip,itnihao/zulip,jimmy54/zulip,johnnygaddarr/zulip,peguin40/zulip,ashwinirudrappa/zulip,Batterfii/zulip,ryansnowboarder/zulip,kaiyuanheshang/zulip,vaidap/zulip,aakash-cr7/zulip,ryansnowboarder/zulip,m1ssou/zulip,ryansnowboarder/zulip,shaunstanislaus/zulip,babbage/zulip,jerryge/zulip,itnihao/zulip,MayB/zulip,kokoar/zulip,vaidap/zulip,eastlhu/zulip,itnihao/zulip,arpith/zulip,RobotCaleb/zulip,timabbott/zulip,ericzhou2008/zulip,johnny9/zulip,susansls/zulip,Vallher/zulip,adnanh/zulip,he15his/zulip,PaulPetring/zulip,hayderimran7/zulip,zorojean/zulip,Diptanshu8/zulip,adnanh/zulip,MariaFaBella85/zulip,levixie/zulip,RobotCaleb/zulip,niftynei/zulip,nicholasbs/zulip,wavelets/zulip,developerfm/zulip,guiquanz/zulip,firstblade/zulip,itnihao/zulip,sonali0901/zulip,hengqujushi/zulip,Drooids/zulip,firstblade/zulip,amallia/zulip,peguin40/zulip,zorojean/zulip,kaiyuanheshang/zulip,dotcool/zulip,Qgap/zulip,armooo/zulip,natanovia/zulip,ipernet/zulip,bitemyapp/zulip,mansilladev/zulip,Gabriel0402/zulip,Frouk/zulip,ApsOps/zulip,ryanbackman/zulip,vabs22/zulip,jerryge/zulip,niftynei/zulip,ikasumiwt/zulip,jphilipsen05/zulip,wavelets/zulip,jeffcao/zulip,wavelets/zulip,he15his/zulip,aps-sids/zulip,xuanhan863/zulip,calvinleenyc/zulip,ApsOps/zulip,PhilSk/zulip,paxapy/zulip,andersk/zulip,dotcool/zulip,karamcnair/zulip,yuvipanda/zulip,shaunstanislaus/zulip,kokoar/zulip,joyhchen/zulip,littledogboy/zulip,swinghu/zulip,noroot/zulip,AZtheAsian/zulip,verma-varsha/zulip,natanovia/zulip,jeffcao/zulip,levixie/zulip,joyhchen/zulip,glovebx/zulip,alliejones/zulip,ipernet/zulip,zhaoweigg/zulip,bssrdf/zulip,LAndreas/zulip,aps-sids/zulip,paxapy/zulip,brainwane/zulip,Batterfii/zulip,m1ssou/zulip,KJin99/zulip,Jianchun1/zulip,hackerkid/zulip,ahmadassaf/zulip,seapasulli/zulip,noroot/zulip,umkay/zulip,codeKonami/zulip,karamcnair/zulip,aliceriot/zulip,mohsenSy/zulip,shaunstanislaus/zulip,umkay/zulip,johnny9/zulip,aliceriot/zulip,jessedhillon/zulip,arpitpanwar/zulip,christi3k/zulip,luyifan/zulip,thomasboyt/zulip,krtkmj/zulip,sonali0901/zulip,ufosky-server/zulip,aakash-cr7/zulip,ipernet/zulip
--- +++ @@ -0,0 +1,66 @@ +from __future__ import absolute_import + +from zephyr.models import UserProfile, get_user_profile_by_id +from zephyr.lib.rate_limiter import redis_key, client, max_api_calls, max_api_window + +from django.core.management.base import BaseCommand +from django.conf import settings +from optparse import make_option + +import os, time, logging + +class Command(BaseCommand): + help = """Checks redis to make sure our rate limiting system hasn't grown a bug and left redis with a bunch of data + + Usage: ./manage.py [--trim] check_redis""" + + option_list = BaseCommand.option_list + ( + make_option('-t', '--trim', + dest='trim', + default=False, + action='store_true', + help="Actually trim excess"), + ) + + def _check_within_range(self, key, count_func, trim_func): + user_id = int(key.split(':')[1]) + try: + user = get_user_profile_by_id(user_id) + except: + user = None + max_calls = max_api_calls(user=user) + + count = count_func() + if count > max_calls: + logging.error("Redis health check found key with more elements \ +than max_api_calls! (trying to trim) %s %s" % (key, count)) + client.expire(key, max_api_window(user=user)) + if self.trim: + trim_func(key, max_calls) + + def handle(self, *args, **options): + if not settings.RATE_LIMITING: + print "This machine is not using redis or rate limiting, aborting" + exit(1) + + # Find all keys, and make sure they're all within size constraints + wildcard_list = "ratelimit:*:*:list" + wildcard_zset = "ratelimit:*:*:zset" + + self.trim = options['trim'] + + lists = client.keys(wildcard_list) + for list_name in lists: + self._check_within_range(list_name, + lambda: client.llen(list_name), + lambda key, max_calls: client.ltrim(key, 0, max_calls - 1)) + + zsets = client.keys(wildcard_zset) + for zset in zsets: + now = time.time() + # We can warn on our zset being too large, but we don't know what + # elements to trim. We'd have to go through every list item and take + # the intersection. The best we can do is expire it + self._check_within_range(zset, + lambda: client.zcount(zset, 0, now), + lambda key, max_calls: None)
29ed484c77ab1c68c5e81f06a527da49713ee427
euler020.py
euler020.py
#!/usr/bin/python from math import factorial fact = str(factorial(100)) result = 0 for i in range(len(fact)): result += int(fact[i]) print(result)
Add solution for problem 20
Add solution for problem 20
Python
mit
cifvts/PyEuler
--- +++ @@ -0,0 +1,11 @@ +#!/usr/bin/python + +from math import factorial + +fact = str(factorial(100)) + +result = 0 +for i in range(len(fact)): + result += int(fact[i]) + +print(result)
073bcb1f6f495305c9d02300646e269fcd2b920e
hc/api/migrations/0059_auto_20190314_1744.py
hc/api/migrations/0059_auto_20190314_1744.py
# Generated by Django 2.1.7 on 2019-03-14 17:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('api', '0058_auto_20190312_1716'), ] operations = [ migrations.AlterField( model_name='channel', name='kind', field=models.CharField(choices=[('email', 'Email'), ('webhook', 'Webhook'), ('hipchat', 'HipChat'), ('slack', 'Slack'), ('pd', 'PagerDuty'), ('pagertree', 'PagerTree'), ('pagerteam', 'PagerTeam'), ('po', 'Pushover'), ('pushbullet', 'Pushbullet'), ('opsgenie', 'OpsGenie'), ('victorops', 'VictorOps'), ('discord', 'Discord'), ('telegram', 'Telegram'), ('sms', 'SMS'), ('zendesk', 'Zendesk'), ('trello', 'Trello'), ('matrix', 'Matrix')], max_length=20), ), ]
Add migration (autogenerated via `manage.py makemigrations`)
Add migration (autogenerated via `manage.py makemigrations`)
Python
bsd-3-clause
healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks,healthchecks/healthchecks,iphoting/healthchecks
--- +++ @@ -0,0 +1,18 @@ +# Generated by Django 2.1.7 on 2019-03-14 17:44 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('api', '0058_auto_20190312_1716'), + ] + + operations = [ + migrations.AlterField( + model_name='channel', + name='kind', + field=models.CharField(choices=[('email', 'Email'), ('webhook', 'Webhook'), ('hipchat', 'HipChat'), ('slack', 'Slack'), ('pd', 'PagerDuty'), ('pagertree', 'PagerTree'), ('pagerteam', 'PagerTeam'), ('po', 'Pushover'), ('pushbullet', 'Pushbullet'), ('opsgenie', 'OpsGenie'), ('victorops', 'VictorOps'), ('discord', 'Discord'), ('telegram', 'Telegram'), ('sms', 'SMS'), ('zendesk', 'Zendesk'), ('trello', 'Trello'), ('matrix', 'Matrix')], max_length=20), + ), + ]
75f69d02100e4f804fd6e742841c0e5ecb1731d2
algorithms/graph-theory/prims-mst-special-subtree/prims-mst.py
algorithms/graph-theory/prims-mst-special-subtree/prims-mst.py
#!/usr/bin/env python import sys from queue import PriorityQueue class Graph(object): """ Represents a graph using an adjacency list. """ def __init__(self, N): self.nodes = [None] * N def add_undir_edge(self, x, y, r): self.add_dir_edge(x, y, r) self.add_dir_edge(y, x, r) def add_dir_edge(self, x, y, r): if self.nodes[x] is None: self.nodes[x] = dict() self.nodes[x][y] = r def load_graph(input): """ Populates a graph using the input stream. """ (N, M) = [int(i) for i in input.readline().split(' ')] graph = Graph(N) for i in range(0, M): (x, y, r) = [int(i) for i in input.readline().split(' ')] graph.add_undir_edge(x - 1, y - 1, r) return graph def prims_weight(graph, S): """ Runs Prim's algorithm on the graph and returns the weight of the MST. """ weight = 0 queue = PriorityQueue() queue.put((0, S)) visited = [False] * len(graph.nodes) while queue.qsize() > 0: (cost, node) = queue.get() if visited[node]: continue visited[node] = True # Mark node as visited weight += cost # Increment MST weight for neighbor in graph.nodes[node]: # Enqueue neighbors if visited[neighbor]: continue cost = graph.nodes[node][neighbor] queue.put((cost, neighbor)) return weight def main(): graph = load_graph(sys.stdin) S = int(sys.stdin.readline()) weight = prims_weight(graph, S - 1) print(weight) if __name__ == '__main__': main()
Implement Prim's MST in Python
Implement Prim's MST in Python
Python
mit
andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms,andreimaximov/algorithms
--- +++ @@ -0,0 +1,66 @@ +#!/usr/bin/env python + +import sys +from queue import PriorityQueue + + +class Graph(object): + """ + Represents a graph using an adjacency list. + """ + def __init__(self, N): + self.nodes = [None] * N + + def add_undir_edge(self, x, y, r): + self.add_dir_edge(x, y, r) + self.add_dir_edge(y, x, r) + + def add_dir_edge(self, x, y, r): + if self.nodes[x] is None: + self.nodes[x] = dict() + self.nodes[x][y] = r + + +def load_graph(input): + """ + Populates a graph using the input stream. + """ + (N, M) = [int(i) for i in input.readline().split(' ')] + graph = Graph(N) + for i in range(0, M): + (x, y, r) = [int(i) for i in input.readline().split(' ')] + graph.add_undir_edge(x - 1, y - 1, r) + return graph + + +def prims_weight(graph, S): + """ + Runs Prim's algorithm on the graph and returns the weight of the MST. + """ + weight = 0 + queue = PriorityQueue() + queue.put((0, S)) + visited = [False] * len(graph.nodes) + while queue.qsize() > 0: + (cost, node) = queue.get() + if visited[node]: + continue + visited[node] = True # Mark node as visited + weight += cost # Increment MST weight + for neighbor in graph.nodes[node]: # Enqueue neighbors + if visited[neighbor]: + continue + cost = graph.nodes[node][neighbor] + queue.put((cost, neighbor)) + return weight + + +def main(): + graph = load_graph(sys.stdin) + S = int(sys.stdin.readline()) + weight = prims_weight(graph, S - 1) + print(weight) + + +if __name__ == '__main__': + main()
5b29eaacb363501c9596061a1bd197c49bb00db3
qa/manage_crypto_listings.py
qa/manage_crypto_listings.py
import requests import json import time from collections import OrderedDict from test_framework.test_framework import OpenBazaarTestFramework, TestFailure class ManageCryptoListingsTest(OpenBazaarTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 def run_test(self): vendor = self.nodes[0] # post profile for vendor with open('testdata/profile.json') as profile_file: profile_json = json.load(profile_file, object_pairs_hook=OrderedDict) api_url = vendor["gateway_url"] + "ob/profile" requests.post(api_url, data=json.dumps(profile_json, indent=4)) # check index r = requests.get(vendor["gateway_url"] + "ob/listings") resp = json.loads(r.text) if len(resp) != 0: raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) # post listing to vendor with open('testdata/listing_crypto.json') as listing_file: listing_json = json.load(listing_file, object_pairs_hook=OrderedDict) api_url = vendor["gateway_url"] + "ob/listing" r = requests.post(api_url, data=json.dumps(listing_json, indent=4)) if r.status_code != 200: resp = json.loads(r.text) raise TestFailure("ManageCryptoListingsTest - FAIL: Listing POST failed. Reason: %s", resp["reason"]) slug = json.loads(r.text)["slug"] # check index r = requests.get(vendor["gateway_url"] + "ob/listings") resp = json.loads(r.text) if len(resp) != 1: raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) for listing in resp: if listing['contractType'] == 'CRYPTOCURRENCY': if listing["coinType"] != "ETH": raise TestFailure("ManageCryptoListingsTest - FAIL: coinType incorrect: %s", listing["coinType"]) # delete listing api_url = vendor["gateway_url"] + "ob/listing/"+slug r = requests.delete(api_url) if r.status_code != 200: resp = json.loads(r.text) raise TestFailure("ManageCryptoListingsTest - FAIL: Listing DELETE failed. Reason: %s", resp["reason"]) # check index r = requests.get(vendor["gateway_url"] + "ob/listings") resp = json.loads(r.text) if len(resp) != 0: raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) print("ManageCryptoListingsTest - PASS") if __name__ == '__main__': print("Running ManageCryptoListingsTest") ManageCryptoListingsTest().main(["--regtest", "--disableexchangerates"])
Add crypto listing management qa test and test listings index.
TESTS: Add crypto listing management qa test and test listings index.
Python
mit
OpenBazaar/openbazaar-go,gubatron/openbazaar-go,hoffmabc/openbazaar-go,hoffmabc/openbazaar-go,OpenBazaar/openbazaar-go,hoffmabc/openbazaar-go,gubatron/openbazaar-go,OpenBazaar/openbazaar-go,gubatron/openbazaar-go
--- +++ @@ -0,0 +1,67 @@ +import requests +import json +import time +from collections import OrderedDict +from test_framework.test_framework import OpenBazaarTestFramework, TestFailure + + +class ManageCryptoListingsTest(OpenBazaarTestFramework): + + def __init__(self): + super().__init__() + self.num_nodes = 1 + + def run_test(self): + vendor = self.nodes[0] + + # post profile for vendor + with open('testdata/profile.json') as profile_file: + profile_json = json.load(profile_file, object_pairs_hook=OrderedDict) + api_url = vendor["gateway_url"] + "ob/profile" + requests.post(api_url, data=json.dumps(profile_json, indent=4)) + + # check index + r = requests.get(vendor["gateway_url"] + "ob/listings") + resp = json.loads(r.text) + if len(resp) != 0: + raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) + + # post listing to vendor + with open('testdata/listing_crypto.json') as listing_file: + listing_json = json.load(listing_file, object_pairs_hook=OrderedDict) + + api_url = vendor["gateway_url"] + "ob/listing" + r = requests.post(api_url, data=json.dumps(listing_json, indent=4)) + if r.status_code != 200: + resp = json.loads(r.text) + raise TestFailure("ManageCryptoListingsTest - FAIL: Listing POST failed. Reason: %s", resp["reason"]) + slug = json.loads(r.text)["slug"] + + # check index + r = requests.get(vendor["gateway_url"] + "ob/listings") + resp = json.loads(r.text) + if len(resp) != 1: + raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) + for listing in resp: + if listing['contractType'] == 'CRYPTOCURRENCY': + if listing["coinType"] != "ETH": + raise TestFailure("ManageCryptoListingsTest - FAIL: coinType incorrect: %s", listing["coinType"]) + + # delete listing + api_url = vendor["gateway_url"] + "ob/listing/"+slug + r = requests.delete(api_url) + if r.status_code != 200: + resp = json.loads(r.text) + raise TestFailure("ManageCryptoListingsTest - FAIL: Listing DELETE failed. Reason: %s", resp["reason"]) + + # check index + r = requests.get(vendor["gateway_url"] + "ob/listings") + resp = json.loads(r.text) + if len(resp) != 0: + raise TestFailure("ManageCryptoListingsTest - FAIL: Incorrect listing count: %d", len(resp)) + + print("ManageCryptoListingsTest - PASS") + +if __name__ == '__main__': + print("Running ManageCryptoListingsTest") + ManageCryptoListingsTest().main(["--regtest", "--disableexchangerates"])
a29540ea36ab4e73ba3d89fc8ed47022af28b482
readthedocs/rtd_tests/tests/test_build_storage.py
readthedocs/rtd_tests/tests/test_build_storage.py
import os import shutil import tempfile from django.test import TestCase from readthedocs.builds.storage import BuildMediaFileSystemStorage files_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'files') class TestBuildMediaStorage(TestCase): def setUp(self): self.test_media_dir = tempfile.mkdtemp() self.storage = BuildMediaFileSystemStorage(location=self.test_media_dir) def tearDown(self): shutil.rmtree(self.test_media_dir, ignore_errors=True) def test_copy_directory(self): self.assertFalse(self.storage.exists('files/test.html')) self.storage.copy_directory(files_dir, 'files') self.assertTrue(self.storage.exists('files/test.html')) self.assertTrue(self.storage.exists('files/conf.py')) self.assertTrue(self.storage.exists('files/api.fjson')) def test_delete_directory(self): self.storage.copy_directory(files_dir, 'files') dirs, files = self.storage.listdir('files') self.assertEqual(dirs, []) self.assertEqual(files, ['api.fjson', 'conf.py', 'test.html']) self.storage.delete_directory('files/') dirs, files = self.storage.listdir('files') self.assertEqual(dirs, []) self.assertEqual(files, []) def test_walk(self): self.storage.copy_directory(files_dir, 'files') self.storage.copy_directory(files_dir, 'files/subfiles') output = list(self.storage.walk('files')) self.assertEqual(len(output), 2) self.assertEqual( output, [ ('files', ['subfiles'], ['api.fjson', 'conf.py', 'test.html']), ('files/subfiles', [], ['api.fjson', 'conf.py', 'test.html']), ], )
Add tests for the build media storage
Add tests for the build media storage
Python
mit
rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org,rtfd/readthedocs.org
--- +++ @@ -0,0 +1,52 @@ +import os +import shutil +import tempfile + +from django.test import TestCase + +from readthedocs.builds.storage import BuildMediaFileSystemStorage + + +files_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'files') + + +class TestBuildMediaStorage(TestCase): + def setUp(self): + self.test_media_dir = tempfile.mkdtemp() + self.storage = BuildMediaFileSystemStorage(location=self.test_media_dir) + + def tearDown(self): + shutil.rmtree(self.test_media_dir, ignore_errors=True) + + def test_copy_directory(self): + self.assertFalse(self.storage.exists('files/test.html')) + + self.storage.copy_directory(files_dir, 'files') + self.assertTrue(self.storage.exists('files/test.html')) + self.assertTrue(self.storage.exists('files/conf.py')) + self.assertTrue(self.storage.exists('files/api.fjson')) + + def test_delete_directory(self): + self.storage.copy_directory(files_dir, 'files') + dirs, files = self.storage.listdir('files') + self.assertEqual(dirs, []) + self.assertEqual(files, ['api.fjson', 'conf.py', 'test.html']) + + self.storage.delete_directory('files/') + dirs, files = self.storage.listdir('files') + self.assertEqual(dirs, []) + self.assertEqual(files, []) + + def test_walk(self): + self.storage.copy_directory(files_dir, 'files') + self.storage.copy_directory(files_dir, 'files/subfiles') + + output = list(self.storage.walk('files')) + self.assertEqual(len(output), 2) + self.assertEqual( + output, + [ + ('files', ['subfiles'], ['api.fjson', 'conf.py', 'test.html']), + ('files/subfiles', [], ['api.fjson', 'conf.py', 'test.html']), + ], + )
7ed7cab1cc41fea7665d9e9c05cbb2eb097486a3
appointment/migrations/0002_vaccineappointment_20181031_1852.py
appointment/migrations/0002_vaccineappointment_20181031_1852.py
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2018-10-31 23:52 from __future__ import unicode_literals import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('appointment', '0001_initial'), ] operations = [ migrations.AlterField( model_name='appointment', name='appointment_type', field=models.CharField(choices=[(b'PSYCH_NIGHT', b'Psych Night'), (b'ACUTE_FOLLOWUP', b'Acute Followup'), (b'CHRONIC_CARE', b'Chronic Care'), (b'VACCINE', b'Vaccine Followup')], default=b'CHRONIC_CARE', max_length=15, verbose_name=b'Appointment Type'), ), migrations.AlterField( model_name='appointment', name='clintime', field=models.TimeField(default=datetime.datetime(2018, 10, 31, 9, 0, tzinfo=utc), verbose_name=b'Time of Appointment'), ), migrations.AlterField( model_name='historicalappointment', name='appointment_type', field=models.CharField(choices=[(b'PSYCH_NIGHT', b'Psych Night'), (b'ACUTE_FOLLOWUP', b'Acute Followup'), (b'CHRONIC_CARE', b'Chronic Care'), (b'VACCINE', b'Vaccine Followup')], default=b'CHRONIC_CARE', max_length=15, verbose_name=b'Appointment Type'), ), migrations.AlterField( model_name='historicalappointment', name='clintime', field=models.TimeField(default=datetime.datetime(2018, 10, 31, 9, 0, tzinfo=utc), verbose_name=b'Time of Appointment'), ), ]
Add migration for vaccine appointment update.
Add migration for vaccine appointment update.
Python
mit
SaturdayNeighborhoodHealthClinic/clintools,SaturdayNeighborhoodHealthClinic/clintools,SaturdayNeighborhoodHealthClinic/clintools
--- +++ @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.9.1 on 2018-10-31 23:52 +from __future__ import unicode_literals + +import datetime +from django.db import migrations, models +from django.utils.timezone import utc + + +class Migration(migrations.Migration): + + dependencies = [ + ('appointment', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='appointment', + name='appointment_type', + field=models.CharField(choices=[(b'PSYCH_NIGHT', b'Psych Night'), (b'ACUTE_FOLLOWUP', b'Acute Followup'), (b'CHRONIC_CARE', b'Chronic Care'), (b'VACCINE', b'Vaccine Followup')], default=b'CHRONIC_CARE', max_length=15, verbose_name=b'Appointment Type'), + ), + migrations.AlterField( + model_name='appointment', + name='clintime', + field=models.TimeField(default=datetime.datetime(2018, 10, 31, 9, 0, tzinfo=utc), verbose_name=b'Time of Appointment'), + ), + migrations.AlterField( + model_name='historicalappointment', + name='appointment_type', + field=models.CharField(choices=[(b'PSYCH_NIGHT', b'Psych Night'), (b'ACUTE_FOLLOWUP', b'Acute Followup'), (b'CHRONIC_CARE', b'Chronic Care'), (b'VACCINE', b'Vaccine Followup')], default=b'CHRONIC_CARE', max_length=15, verbose_name=b'Appointment Type'), + ), + migrations.AlterField( + model_name='historicalappointment', + name='clintime', + field=models.TimeField(default=datetime.datetime(2018, 10, 31, 9, 0, tzinfo=utc), verbose_name=b'Time of Appointment'), + ), + ]
c46731098c6a8f26e4de899d2d2e083734f6772f
numpy/typing/tests/test_typing_extensions.py
numpy/typing/tests/test_typing_extensions.py
"""Tests for the optional typing-extensions dependency.""" import sys import types import inspect import importlib import typing_extensions import numpy.typing as npt def _is_sub_module(obj: object) -> bool: """Check if `obj` is a `numpy.typing` submodule.""" return inspect.ismodule(obj) and obj.__name__.startswith("numpy.typing") def _is_dunder(name: str) -> bool: """Check whether `name` is a dunder.""" return name.startswith("__") and name.endswith("__") def _clear_attr(module: types.ModuleType) -> None: """Clear all (non-dunder) module-level attributes.""" del_names = [name for name in vars(module) if not _is_dunder(name)] for name in del_names: delattr(module, name) MODULES = {"numpy.typing": npt} MODULES.update({ f"numpy.typing.{k}": v for k, v in vars(npt).items() if _is_sub_module(v) }) def test_no_typing_extensions() -> None: """Import `numpy.typing` in the absence of typing-extensions. Notes ----- Ideally, we'd just run the normal typing tests in an environment where typing-extensions is not installed, but unfortunatelly this is currently impossible as it is an indirect hard dependency of pytest. """ assert "typing_extensions" in sys.modules try: sys.modules["typing_extensions"] = None for name, module in MODULES.items(): _clear_attr(module) assert importlib.reload(module), name finally: sys.modules["typing_extensions"] = typing_extensions for module in MODULES.values(): _clear_attr(module) importlib.reload(module)
Test that `numpy.typing` can be imported in the the absence of typing-extensions
TST: Test that `numpy.typing` can be imported in the the absence of typing-extensions
Python
bsd-3-clause
pdebuyl/numpy,rgommers/numpy,charris/numpy,anntzer/numpy,numpy/numpy,rgommers/numpy,mhvk/numpy,seberg/numpy,pdebuyl/numpy,endolith/numpy,jakirkham/numpy,mattip/numpy,mhvk/numpy,simongibbons/numpy,numpy/numpy,endolith/numpy,simongibbons/numpy,pdebuyl/numpy,simongibbons/numpy,endolith/numpy,anntzer/numpy,seberg/numpy,numpy/numpy,mattip/numpy,endolith/numpy,anntzer/numpy,simongibbons/numpy,charris/numpy,charris/numpy,mattip/numpy,mhvk/numpy,charris/numpy,rgommers/numpy,pdebuyl/numpy,numpy/numpy,mattip/numpy,mhvk/numpy,rgommers/numpy,seberg/numpy,jakirkham/numpy,jakirkham/numpy,jakirkham/numpy,seberg/numpy,jakirkham/numpy,anntzer/numpy,mhvk/numpy,simongibbons/numpy
--- +++ @@ -0,0 +1,56 @@ +"""Tests for the optional typing-extensions dependency.""" + +import sys +import types +import inspect +import importlib + +import typing_extensions +import numpy.typing as npt + + +def _is_sub_module(obj: object) -> bool: + """Check if `obj` is a `numpy.typing` submodule.""" + return inspect.ismodule(obj) and obj.__name__.startswith("numpy.typing") + + +def _is_dunder(name: str) -> bool: + """Check whether `name` is a dunder.""" + return name.startswith("__") and name.endswith("__") + + +def _clear_attr(module: types.ModuleType) -> None: + """Clear all (non-dunder) module-level attributes.""" + del_names = [name for name in vars(module) if not _is_dunder(name)] + for name in del_names: + delattr(module, name) + + +MODULES = {"numpy.typing": npt} +MODULES.update({ + f"numpy.typing.{k}": v for k, v in vars(npt).items() if _is_sub_module(v) +}) + + +def test_no_typing_extensions() -> None: + """Import `numpy.typing` in the absence of typing-extensions. + + Notes + ----- + Ideally, we'd just run the normal typing tests in an environment where + typing-extensions is not installed, but unfortunatelly this is currently + impossible as it is an indirect hard dependency of pytest. + + """ + assert "typing_extensions" in sys.modules + + try: + sys.modules["typing_extensions"] = None + for name, module in MODULES.items(): + _clear_attr(module) + assert importlib.reload(module), name + finally: + sys.modules["typing_extensions"] = typing_extensions + for module in MODULES.values(): + _clear_attr(module) + importlib.reload(module)
0a6f6db77dd888b810089659100158ed4e8e3cee
test/test_object_factory.py
test/test_object_factory.py
import unittest import groundstation.objects.object_factory as object_factory from groundstation.objects.root_object import RootObject from groundstation.objects.update_object import UpdateObject class TestRootObject(unittest.TestCase): def test_hydrate_root_object(self): root = RootObject( "test_object", "richo@psych0tik.net:groundstation/tests", "richo@psych0tik.net:groundstation/testcase" ) hydrated_root = object_factory.hydrate_object(root.as_object()) self.assertTrue(isinstance(hydrated_root, RootObject)) class TestUpdateObject(unittest.TestCase): def test_hydate_update_with_1_parent(self): update = UpdateObject( ["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c"], "Lol I r update data" ) hydrated_update = object_factory.hydrate_object(update.as_object()) self.assertTrue(isinstance(hydrated_update, UpdateObject)) def test_hydate_update_with_2_parent(self): update = UpdateObject( ["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c", "d41e2dadaf624319518a9dfa8ef4cb0dde055bff"], "Lol I r update data" ) hydrated_update = object_factory.hydrate_object(update.as_object()) self.assertTrue(isinstance(hydrated_update, UpdateObject))
Add tests for the object factory with various permutations
Add tests for the object factory with various permutations
Python
mit
richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation
--- +++ @@ -0,0 +1,33 @@ +import unittest +import groundstation.objects.object_factory as object_factory +from groundstation.objects.root_object import RootObject +from groundstation.objects.update_object import UpdateObject + + +class TestRootObject(unittest.TestCase): + def test_hydrate_root_object(self): + root = RootObject( + "test_object", + "richo@psych0tik.net:groundstation/tests", + "richo@psych0tik.net:groundstation/testcase" + ) + hydrated_root = object_factory.hydrate_object(root.as_object()) + self.assertTrue(isinstance(hydrated_root, RootObject)) + +class TestUpdateObject(unittest.TestCase): + def test_hydate_update_with_1_parent(self): + update = UpdateObject( + ["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c"], + "Lol I r update data" + ) + hydrated_update = object_factory.hydrate_object(update.as_object()) + self.assertTrue(isinstance(hydrated_update, UpdateObject)) + + def test_hydate_update_with_2_parent(self): + update = UpdateObject( + ["d41e2dadaf624319518a9dfa8ef4cb0dde055b5c", + "d41e2dadaf624319518a9dfa8ef4cb0dde055bff"], + "Lol I r update data" + ) + hydrated_update = object_factory.hydrate_object(update.as_object()) + self.assertTrue(isinstance(hydrated_update, UpdateObject))
93bc13af093186b3a74570882135b81ddeeb6719
drudge/term.py
drudge/term.py
"""Tensor term definition and utility.""" from sympy import sympify class Range: """A symbolic range that can be summed over. This class is for symbolic ranges that is going to be summed over in tensors. Each range should have a label, and optionally lower and upper bounds, which should be both given or absent. The bounds will not be directly used for symbolic computation, but rather designed for printers and conversion to SymPy summation. Note that ranges are assumed to be atomic and disjoint. Even in the presence of lower and upper bounds, unequal ranges are assumed to be disjoint. .. warning:: Unequal ranges are always assumed to be disjoint. """ __slots__ = [ '_label', '_lower', '_upper' ] def __init__(self, label, lower=None, upper=None): """Initialize the symbolic range.""" self._label = label self._lower = sympify(lower) if lower is not None else lower if self._lower is None: if upper is not None: raise ValueError('lower range has not been given.') else: self._upper = None else: if upper is None: raise ValueError('upper range has not been given.') else: self._upper = sympify(upper) @property def label(self): """Get the label of the range.""" return self._label @property def lower(self): """Get the lower bound of the range.""" return self._lower @property def upper(self): """Get the upper bound of the range.""" return self._upper @property def args(self): """Get the arguments for range creation. When the bounds are present, we have a triple, or we have a singleton tuple of only the label. """ if self._lower is not None: return (self._label, self._lower, self._upper) else: return (self._label,) def __hash__(self): """Hash the symbolic range.""" return hash(self.args) def __eq__(self, other): """Compare equality of two ranges.""" return isinstance(other, type(self)) and ( self.args == other.args ) def __repr__(self): """Form the representative string.""" return ''.join([ 'Range(', ', '.join(repr(i) for i in self.args), ')' ]) def __str__(self): """Form readable string representation.""" return str(self._label)
Add class for symbolic ranges
Add class for symbolic ranges Compared with PySLATA, this definition is a lot more simplified. All the ranges are assumed to be atomic and disjoint. No need to implement the range arithmetic.
Python
mit
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
--- +++ @@ -0,0 +1,91 @@ +"""Tensor term definition and utility.""" + +from sympy import sympify + + +class Range: + """A symbolic range that can be summed over. + + This class is for symbolic ranges that is going to be summed over in + tensors. Each range should have a label, and optionally lower and upper + bounds, which should be both given or absent. The bounds will not be + directly used for symbolic computation, but rather designed for printers + and conversion to SymPy summation. Note that ranges are assumed to be + atomic and disjoint. Even in the presence of lower and upper bounds, + unequal ranges are assumed to be disjoint. + + .. warning:: + + Unequal ranges are always assumed to be disjoint. + + """ + + __slots__ = [ + '_label', + '_lower', + '_upper' + ] + + def __init__(self, label, lower=None, upper=None): + """Initialize the symbolic range.""" + self._label = label + self._lower = sympify(lower) if lower is not None else lower + + if self._lower is None: + if upper is not None: + raise ValueError('lower range has not been given.') + else: + self._upper = None + else: + if upper is None: + raise ValueError('upper range has not been given.') + else: + self._upper = sympify(upper) + + @property + def label(self): + """Get the label of the range.""" + return self._label + + @property + def lower(self): + """Get the lower bound of the range.""" + return self._lower + + @property + def upper(self): + """Get the upper bound of the range.""" + return self._upper + + @property + def args(self): + """Get the arguments for range creation. + + When the bounds are present, we have a triple, or we have a singleton + tuple of only the label. + """ + + if self._lower is not None: + return (self._label, self._lower, self._upper) + else: + return (self._label,) + + def __hash__(self): + """Hash the symbolic range.""" + return hash(self.args) + + def __eq__(self, other): + """Compare equality of two ranges.""" + return isinstance(other, type(self)) and ( + self.args == other.args + ) + + def __repr__(self): + """Form the representative string.""" + return ''.join([ + 'Range(', ', '.join(repr(i) for i in self.args), ')' + ]) + + def __str__(self): + """Form readable string representation.""" + return str(self._label)
0f72c4bf32986aae7a59b2380c5a314038c7ed61
aids/stack/queue_two_stacks.py
aids/stack/queue_two_stacks.py
''' Implement Queue data structure using two stacks ''' from stack import Stack class QueueUsingTwoStacks(object): def __init__(self): ''' Initialize Queue ''' self.stack1 = Stack() self.stack2 = Stack() def __len__(self): ''' Return number of items in Queue ''' return len(self.stack1) + len(self.stack2) def enqueue(self,value): ''' Enqueue item to queue ''' self.stack1.push(value) def dequeue(self): ''' Dequeue item from queue ''' if not self.stack2.is_empty(): return self.stack2.pop() while not self.stack1.is_empty(): self.stack2.push(self.stack1.pop()) return self.stack2.pop()
Add Queue implementation using two stacks
Add Queue implementation using two stacks
Python
mit
ueg1990/aids
--- +++ @@ -0,0 +1,40 @@ +''' +Implement Queue data structure using two stacks + +''' + +from stack import Stack + +class QueueUsingTwoStacks(object): + def __init__(self): + ''' + Initialize Queue + + ''' + self.stack1 = Stack() + self.stack2 = Stack() + + def __len__(self): + ''' + Return number of items in Queue + + ''' + return len(self.stack1) + len(self.stack2) + + def enqueue(self,value): + ''' + Enqueue item to queue + + ''' + self.stack1.push(value) + + def dequeue(self): + ''' + Dequeue item from queue + + ''' + if not self.stack2.is_empty(): + return self.stack2.pop() + while not self.stack1.is_empty(): + self.stack2.push(self.stack1.pop()) + return self.stack2.pop()
1cc99c8e7c020457034d8ff1a4b85033bbe64353
tools/generator/raw-data-extractor/extract-sam.py
tools/generator/raw-data-extractor/extract-sam.py
import urllib.request import zipfile import re, io, os import shutil from pathlib import Path from multiprocessing import Pool from collections import defaultdict from distutils.version import StrictVersion packurl = "http://packs.download.atmel.com/" shutil.rmtree("../raw-device-data/sam-devices", ignore_errors=True) Path("../raw-device-data/sam-devices").mkdir(exist_ok=True, parents=True) with urllib.request.urlopen(packurl) as response: html = response.read().decode("utf-8") family_links = defaultdict(list) for link, family, version in re.findall(r'data-link="(Atmel\.(SAM.*?)_DFP\.(.*?)\.atpack)"', html): family_links[family].append((link, StrictVersion(version),)) # Choose only the latest version of the atpack family_links = [(family, sorted(data, key=lambda d: d[1])[-1][0]) for family, data in family_links.items()] def dl(family_link): family, link, = family_link dest = "../raw-device-data/sam-devices/{}".format(family.lower()) print("Downloading '{}'...".format(link)) with urllib.request.urlopen(packurl + link) as content: z = zipfile.ZipFile(io.BytesIO(content.read())) print("Extracting '{}'...".format(link)) # remove subfolders, some packs have several chips per pack for zi in z.infolist(): if zi.filename.endswith(".atdf"): zi.filename = os.path.basename(zi.filename) z.extract(zi, dest) if __name__ == "__main__": with Pool(len(family_links)) as p: p.map(dl, family_links) # shutil.copy("patches/sam.patch", "../raw-device-data") # os.system("(cd ../raw-device-data; patch -p1 -f --input=sam.patch)") # os.remove("../raw-device-data/sam.patch")
Add SAM device data extractor
[dfg] Add SAM device data extractor
Python
mpl-2.0
modm-io/modm-devices
--- +++ @@ -0,0 +1,47 @@ + +import urllib.request +import zipfile +import re, io, os +import shutil + +from pathlib import Path +from multiprocessing import Pool +from collections import defaultdict +from distutils.version import StrictVersion + + +packurl = "http://packs.download.atmel.com/" + +shutil.rmtree("../raw-device-data/sam-devices", ignore_errors=True) +Path("../raw-device-data/sam-devices").mkdir(exist_ok=True, parents=True) + +with urllib.request.urlopen(packurl) as response: + html = response.read().decode("utf-8") + family_links = defaultdict(list) + for link, family, version in re.findall(r'data-link="(Atmel\.(SAM.*?)_DFP\.(.*?)\.atpack)"', html): + family_links[family].append((link, StrictVersion(version),)) + # Choose only the latest version of the atpack + family_links = [(family, sorted(data, key=lambda d: d[1])[-1][0]) + for family, data in family_links.items()] + +def dl(family_link): + family, link, = family_link + dest = "../raw-device-data/sam-devices/{}".format(family.lower()) + print("Downloading '{}'...".format(link)) + + with urllib.request.urlopen(packurl + link) as content: + z = zipfile.ZipFile(io.BytesIO(content.read())) + print("Extracting '{}'...".format(link)) + # remove subfolders, some packs have several chips per pack + for zi in z.infolist(): + if zi.filename.endswith(".atdf"): + zi.filename = os.path.basename(zi.filename) + z.extract(zi, dest) + +if __name__ == "__main__": + with Pool(len(family_links)) as p: + p.map(dl, family_links) + +# shutil.copy("patches/sam.patch", "../raw-device-data") +# os.system("(cd ../raw-device-data; patch -p1 -f --input=sam.patch)") +# os.remove("../raw-device-data/sam.patch")
3b519b6ce6319797ef0544ea2567e918fa4df1b3
hangul_test.py
hangul_test.py
import hangul s = 'ㅎㅏㄴㅅㅓㅁㄱㅣ' print(hangul.conjoin(s)) s = '한섬기' print(hangul.conjoin(s)) print(ord('ㅎ')) print(ord(u'\u1112')) print(chr(12622)) print(chr(4370)) print(hex(12622)) print(hex(4370))
Add test module of hangul.
Add test module of hangul.
Python
mit
iandmyhand/python-utils
--- +++ @@ -0,0 +1,14 @@ +import hangul + +s = 'ㅎㅏㄴㅅㅓㅁㄱㅣ' +print(hangul.conjoin(s)) + +s = '한섬기' +print(hangul.conjoin(s)) + +print(ord('ㅎ')) +print(ord(u'\u1112')) +print(chr(12622)) +print(chr(4370)) +print(hex(12622)) +print(hex(4370))
fe760be64eac3290a358e4a488af9946ea6f2a1d
corehq/form_processor/management/commands/run_sql.py
corehq/form_processor/management/commands/run_sql.py
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import sys import traceback import attr import gevent from django.core.management.base import BaseCommand from django.db import connections from six.moves import input from corehq.sql_db.util import get_db_aliases_for_partitioned_query @attr.s class Statement(object): sql = attr.ib() help = attr.ib(default="") BLOBMETA_KEY = Statement(""" CREATE INDEX CONCURRENTLY IF NOT EXISTS form_processor_xformattachmentsql_blobmeta_key ON public.form_processor_xformattachmentsql ((( CASE WHEN blob_bucket = '' THEN '' -- empty bucket -> blob_id is the key ELSE COALESCE(blob_bucket, 'form/' || attachment_id) || '/' END || blob_id )::varchar(255))) """, help="See https://github.com/dimagi/commcare-hq/pull/21631") STATEMENTS = { "blobmeta_key": BLOBMETA_KEY, } MULTI_DB = 'Execute on ALL (%s) databases in parallel. Continue?' class Command(BaseCommand): help = """Run SQL concurrently on partition databases.""" def add_arguments(self, parser): parser.add_argument('name', choices=list(STATEMENTS), help="SQL statement name.") parser.add_argument('-d', '--db_name', help='Django DB alias to run on') def handle(self, name, db_name, **options): sql = STATEMENTS[name].sql db_names = get_db_aliases_for_partitioned_query() if db_name or len(db_names) == 1: run_sql(db_name or db_names[0], sql) elif not confirm(MULTI_DB % len(db_names)): sys.exit('abort') else: greenlets = [] for db_name in db_names: g = gevent.spawn(run_sql, db_name, sql) greenlets.append(g) gevent.joinall(greenlets) try: for job in greenlets: job.get() except Exception: traceback.print_exc() def run_sql(db_name, sql): print("running on %s database" % db_name) with connections[db_name].cursor() as cursor: cursor.execute(sql) def confirm(msg): return input(msg + "\n(y/N) ").lower() == 'y'
Add management command to run SQL on form dbs
Add management command to run SQL on form dbs This is specifically to create an index concurrently, but could be useful in the future to run any SQL on multiple databases. [ci skip] tested locally
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,74 @@ +from __future__ import absolute_import +from __future__ import print_function +from __future__ import unicode_literals +import sys +import traceback + +import attr +import gevent +from django.core.management.base import BaseCommand +from django.db import connections +from six.moves import input + +from corehq.sql_db.util import get_db_aliases_for_partitioned_query + + +@attr.s +class Statement(object): + sql = attr.ib() + help = attr.ib(default="") + + +BLOBMETA_KEY = Statement(""" +CREATE INDEX CONCURRENTLY IF NOT EXISTS form_processor_xformattachmentsql_blobmeta_key +ON public.form_processor_xformattachmentsql ((( + CASE + WHEN blob_bucket = '' THEN '' -- empty bucket -> blob_id is the key + ELSE COALESCE(blob_bucket, 'form/' || attachment_id) || '/' + END || blob_id +)::varchar(255))) +""", help="See https://github.com/dimagi/commcare-hq/pull/21631") + +STATEMENTS = { + "blobmeta_key": BLOBMETA_KEY, +} + +MULTI_DB = 'Execute on ALL (%s) databases in parallel. Continue?' + + +class Command(BaseCommand): + help = """Run SQL concurrently on partition databases.""" + + def add_arguments(self, parser): + parser.add_argument('name', choices=list(STATEMENTS), help="SQL statement name.") + parser.add_argument('-d', '--db_name', help='Django DB alias to run on') + + def handle(self, name, db_name, **options): + sql = STATEMENTS[name].sql + db_names = get_db_aliases_for_partitioned_query() + if db_name or len(db_names) == 1: + run_sql(db_name or db_names[0], sql) + elif not confirm(MULTI_DB % len(db_names)): + sys.exit('abort') + else: + greenlets = [] + for db_name in db_names: + g = gevent.spawn(run_sql, db_name, sql) + greenlets.append(g) + + gevent.joinall(greenlets) + try: + for job in greenlets: + job.get() + except Exception: + traceback.print_exc() + + +def run_sql(db_name, sql): + print("running on %s database" % db_name) + with connections[db_name].cursor() as cursor: + cursor.execute(sql) + + +def confirm(msg): + return input(msg + "\n(y/N) ").lower() == 'y'
1040e17e006fef93d990a6212e8be06e0a818c2f
middleware/python/test_middleware.py
middleware/python/test_middleware.py
from tyk.decorators import * @Pre def AddSomeHeader(request, session): # request['Body'] = 'tyk=python' request['SetHeaders']['SomeHeader'] = 'python2' return request, session def NotARealHandler(): pass
from tyk.decorators import * from tyk.gateway import TykGateway as tyk @Pre def AddSomeHeader(request, session): request['SetHeaders']['SomeHeader'] = 'python' tyk.store_data( "cool_key", "cool_value", 300 ) return request, session def NotARealHandler(): pass
Update middleware syntax with "TykGateway"
Update middleware syntax with "TykGateway"
Python
mpl-2.0
mvdan/tyk,nebolsin/tyk,nebolsin/tyk,lonelycode/tyk,mvdan/tyk,mvdan/tyk,lonelycode/tyk,mvdan/tyk,nebolsin/tyk,lonelycode/tyk,mvdan/tyk,mvdan/tyk,nebolsin/tyk,mvdan/tyk,nebolsin/tyk,nebolsin/tyk,mvdan/tyk,nebolsin/tyk,nebolsin/tyk
--- +++ @@ -1,9 +1,10 @@ from tyk.decorators import * +from tyk.gateway import TykGateway as tyk @Pre def AddSomeHeader(request, session): - # request['Body'] = 'tyk=python' - request['SetHeaders']['SomeHeader'] = 'python2' + request['SetHeaders']['SomeHeader'] = 'python' + tyk.store_data( "cool_key", "cool_value", 300 ) return request, session def NotARealHandler():
88d87f4051629c6a32e458077c543275eed0243e
setup.py
setup.py
from setuptools import setup, find_packages setup( name='jupyterhub-kubespawner', version='0.5.1', install_requires=[ 'jupyterhub', 'pyyaml', 'kubernetes==2.*', 'escapism', 'jupyter', ], setup_requires=['pytest-runner'], tests_require=['pytest'], description='JupyterHub Spawner targeting Kubernetes', url='http://github.com/jupyterhub/kubespawner', author='Yuvi Panda', author_email='yuvipanda@gmail.com', license='BSD', packages=find_packages(), )
from setuptools import setup, find_packages setup( name='jupyterhub-kubespawner', version='0.5.1', install_requires=[ 'jupyterhub', 'pyYAML', 'kubernetes==2.*', 'escapism', 'jupyter', ], setup_requires=['pytest-runner'], tests_require=['pytest'], description='JupyterHub Spawner targeting Kubernetes', url='http://github.com/jupyterhub/kubespawner', author='Yuvi Panda', author_email='yuvipanda@gmail.com', license='BSD', packages=find_packages(), )
Fix install of pyYAML on Travis
Fix install of pyYAML on Travis
Python
bsd-3-clause
yuvipanda/jupyterhub-kubernetes-spawner,jupyterhub/kubespawner,ktong/kubespawner
--- +++ @@ -5,7 +5,7 @@ version='0.5.1', install_requires=[ 'jupyterhub', - 'pyyaml', + 'pyYAML', 'kubernetes==2.*', 'escapism', 'jupyter',
6534d06c450be73044d8130cfa6a534f7bff885f
iypm_domain.py
iypm_domain.py
import sys try: from troposphere import Join, Sub, Output, Export from troposphere import Parameter, Ref, Template from troposphere.route53 import HostedZone from troposphere.certificatemanager import Certificate except ImportError: sys.exit('Unable to import troposphere. ' 'Try "pip install troposphere[policy]".') t = Template() t.add_description( 'Template for creating a DNS Zone and SSL Certificate. ' 'Note: Stack creation will block until domain ownership is verified.') zone_name = t.add_parameter(Parameter( 'ZoneName', Description='The name of the DNS Zone to create (example.com).', Type='String' )) hosted_zone = t.add_resource(HostedZone('DNSZone', Name=Ref(zone_name))) acm_certificate = t.add_resource(Certificate( 'Certificate', DomainName=Ref(zone_name), SubjectAlternativeNames=[Sub('*.${ZoneName}')] )) t.add_output([ Output( 'ZoneId', Description='Route53 Zone ID', Value=Ref(hosted_zone), Export=Export(Sub('${AWS::StackName}-${ZoneName}-R53Zone')) ), Output( 'CertificateId', Description='ACM Certificate ARN', Value=Ref(acm_certificate), Export=Export(Sub('${AWS::StackName}-${ZoneName}-CertARN')) ) ]) print(t.to_json())
Add troposphere cloudformation domain and ssl script
Add troposphere cloudformation domain and ssl script
Python
mpl-2.0
MinnSoe/ifyoupayme,MinnSoe/ifyoupayme,MinnSoe/ifyoupayme
--- +++ @@ -0,0 +1,54 @@ +import sys + +try: + from troposphere import Join, Sub, Output, Export + from troposphere import Parameter, Ref, Template + from troposphere.route53 import HostedZone + from troposphere.certificatemanager import Certificate +except ImportError: + sys.exit('Unable to import troposphere. ' + 'Try "pip install troposphere[policy]".') + + +t = Template() + + +t.add_description( + 'Template for creating a DNS Zone and SSL Certificate. ' + 'Note: Stack creation will block until domain ownership is verified.') + + +zone_name = t.add_parameter(Parameter( + 'ZoneName', + Description='The name of the DNS Zone to create (example.com).', + Type='String' +)) + + +hosted_zone = t.add_resource(HostedZone('DNSZone', Name=Ref(zone_name))) + + +acm_certificate = t.add_resource(Certificate( + 'Certificate', + DomainName=Ref(zone_name), + SubjectAlternativeNames=[Sub('*.${ZoneName}')] +)) + + +t.add_output([ + Output( + 'ZoneId', + Description='Route53 Zone ID', + Value=Ref(hosted_zone), + Export=Export(Sub('${AWS::StackName}-${ZoneName}-R53Zone')) + ), + Output( + 'CertificateId', + Description='ACM Certificate ARN', + Value=Ref(acm_certificate), + Export=Export(Sub('${AWS::StackName}-${ZoneName}-CertARN')) + ) +]) + + +print(t.to_json())
9f48522c385c81200f04a027e0299ddf7c81ef84
runtime_stats/combine_graphs.py
runtime_stats/combine_graphs.py
#!/usr/bin/env python2.7 # # Copyright 2011-2013 Colin Scott # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from generate_graph import write_data_file, load_json, DataInfo, write_gpi_template, invoke_gnuplot import string import argparse import json import os if __name__ == '__main__': parser = argparse.ArgumentParser(description="generate a plot") # TODO(cs): generalize to indefinite input files parser.add_argument('input1', metavar="INPUT1", help='''The first input json file''') parser.add_argument('title1', metavar="TITLE1", help='''The title for input1's line''') parser.add_argument('input2', metavar="INPUT2", help='''The second input json file''') parser.add_argument('title2', metavar="TITLE2", help='''The title for input2's line''') args = parser.parse_args() basename = os.path.basename(args.input1) dirname = os.path.dirname(args.input1) gpi_filename = string.replace(dirname + "combined_" + basename, ".json", ".gpi") output_filename = string.replace(dirname + "combined_" + basename, ".json", ".pdf") graph_title = "" data_info_list = [] for input_json, line_title in [(args.input1, args.title1), (args.input2, args.title2)]: dat_filename = string.replace(input_json, ".json", ".dat") stats = load_json(input_json) write_data_file(dat_filename, stats) data_info_list.append(DataInfo(title=line_title, filename=dat_filename)) if 'prune_duration_seconds' in stats: graph_title += "%s runtime=%.1fs" % (line_title, stats['prune_duration_seconds']) write_gpi_template(gpi_filename, output_filename, data_info_list, graph_title) invoke_gnuplot(gpi_filename) print "Output placed in %s" % output_filename
Add a script for combining graphs
Add a script for combining graphs
Python
apache-2.0
jmiserez/sts,ucb-sts/sts,ucb-sts/sts,jmiserez/sts
--- +++ @@ -0,0 +1,55 @@ +#!/usr/bin/env python2.7 +# +# Copyright 2011-2013 Colin Scott +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at: +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from generate_graph import write_data_file, load_json, DataInfo, write_gpi_template, invoke_gnuplot +import string +import argparse +import json +import os + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description="generate a plot") + # TODO(cs): generalize to indefinite input files + parser.add_argument('input1', metavar="INPUT1", + help='''The first input json file''') + parser.add_argument('title1', metavar="TITLE1", + help='''The title for input1's line''') + parser.add_argument('input2', metavar="INPUT2", + help='''The second input json file''') + parser.add_argument('title2', metavar="TITLE2", + help='''The title for input2's line''') + args = parser.parse_args() + + basename = os.path.basename(args.input1) + dirname = os.path.dirname(args.input1) + gpi_filename = string.replace(dirname + "combined_" + basename, ".json", ".gpi") + output_filename = string.replace(dirname + "combined_" + basename, ".json", ".pdf") + + graph_title = "" + + data_info_list = [] + for input_json, line_title in [(args.input1, args.title1), (args.input2, args.title2)]: + dat_filename = string.replace(input_json, ".json", ".dat") + stats = load_json(input_json) + write_data_file(dat_filename, stats) + data_info_list.append(DataInfo(title=line_title, filename=dat_filename)) + if 'prune_duration_seconds' in stats: + graph_title += "%s runtime=%.1fs" % (line_title, stats['prune_duration_seconds']) + + write_gpi_template(gpi_filename, output_filename, data_info_list, graph_title) + invoke_gnuplot(gpi_filename) + + print "Output placed in %s" % output_filename
c689a03dcc84315ff5c3796615a80146f8e74d1f
scripts/get_bank_registry_lv.py
scripts/get_bank_registry_lv.py
#!/usr/bin/env python import json import xlrd import requests URL = "https://www.bank.lv/images/stories/pielikumi/makssist/bic_saraksts_22.01.2020_eng.xls" def process(): registry = [] book = xlrd.open_workbook(file_contents=requests.get(URL).content) sheet = book.sheet_by_index(0) for row in list(sheet.get_rows())[2:]: name, bank_code, bic = row[1:] registry.append( { "country_code": "LV", "primary": True, "bic": bic.value.upper(), "bank_code": bank_code.value[4:8], "name": name.value, "short_name": name.value, } ) return registry if __name__ == "__main__": with open("schwifty/bank_registry/generated_lv.json", "w") as fp: json.dump(process(), fp, indent=2)
Add generate Latvia bank registry script
Add generate Latvia bank registry script
Python
mit
figo-connect/schwifty
--- +++ @@ -0,0 +1,32 @@ +#!/usr/bin/env python +import json +import xlrd +import requests + +URL = "https://www.bank.lv/images/stories/pielikumi/makssist/bic_saraksts_22.01.2020_eng.xls" + + +def process(): + registry = [] + + book = xlrd.open_workbook(file_contents=requests.get(URL).content) + sheet = book.sheet_by_index(0) + + for row in list(sheet.get_rows())[2:]: + name, bank_code, bic = row[1:] + registry.append( + { + "country_code": "LV", + "primary": True, + "bic": bic.value.upper(), + "bank_code": bank_code.value[4:8], + "name": name.value, + "short_name": name.value, + } + ) + return registry + + +if __name__ == "__main__": + with open("schwifty/bank_registry/generated_lv.json", "w") as fp: + json.dump(process(), fp, indent=2)
90fc1ce356ca1bc367f6a5234d2267600a0a789a
contrib/create_smimea.py
contrib/create_smimea.py
#!/usr/bin/env python import os import sys import base64 as b64 import argparse def smimea(usage, der): cert = [] c = 0 with open(der, "rb") as f: while True: l = f.read(1024) if l == "": break c += len(l) cert.append(l) data = b64.b16encode("".join(cert)) print "\# %i 0%i000000%s" % (c + 4, usage, data) if __name__ == "__main__": parser = argparse.ArgumentParser(epilog="Generate SMIMEA RDATA for DNS") parser.add_argument("--usage-field", "-u", default=1, type=int, choices=(1, 3), help="Certificate usage field") parser.add_argument("--email", "-e", default="", type=str, help="Create SHA224 LHS for an email address") parser.add_argument("--cert", "-c", default="", required=True, help="x509 certificate in DER format") args = parser.parse_args() if not os.path.exists(args.cert): print("File not found: '%s'" % args.cert) sys.exit(os.EX_USAGE) if args.email is not "": import hashlib local, domain = args.email.split("@") print hashlib.sha224(local).hexdigest() + \ "._encr._smimecert.%s. IN TYPE65514" % domain, smimea(args.usage_field, args.cert) sys.exit(os.EX_OK)
Add little helper script to create DNS records
Add little helper script to create DNS records
Python
agpl-3.0
sys4/smilla
--- +++ @@ -0,0 +1,55 @@ +#!/usr/bin/env python + +import os +import sys +import base64 as b64 +import argparse + +def smimea(usage, der): + cert = [] + + c = 0 + with open(der, "rb") as f: + while True: + l = f.read(1024) + if l == "": break + + c += len(l) + cert.append(l) + + data = b64.b16encode("".join(cert)) + print "\# %i 0%i000000%s" % (c + 4, usage, data) + +if __name__ == "__main__": + parser = argparse.ArgumentParser(epilog="Generate SMIMEA RDATA for DNS") + parser.add_argument("--usage-field", + "-u", + default=1, + type=int, + choices=(1, 3), + help="Certificate usage field") + parser.add_argument("--email", + "-e", + default="", + type=str, + help="Create SHA224 LHS for an email address") + parser.add_argument("--cert", + "-c", + default="", + required=True, + help="x509 certificate in DER format") + + args = parser.parse_args() + + if not os.path.exists(args.cert): + print("File not found: '%s'" % args.cert) + sys.exit(os.EX_USAGE) + + if args.email is not "": + import hashlib + local, domain = args.email.split("@") + print hashlib.sha224(local).hexdigest() + \ + "._encr._smimecert.%s. IN TYPE65514" % domain, + + smimea(args.usage_field, args.cert) + sys.exit(os.EX_OK)
1f80b634596bfaa8c4b538a5e8011399bcad6253
test/selenium/src/lib/element/widget_info.py
test/selenium/src/lib/element/widget_info.py
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> # Created By: jernej@reciprocitylabs.com # Maintained By: jernej@reciprocitylabs.com from lib import base from lib.constants import locator from lib.page import widget_modal from lib.page import lhn_modal class _DropdownSettings(base.Component): _locator = locator.Widget def __init__(self, driver): super(_DropdownSettings, self).__init__(driver) self.edit = base.Button(driver, self._locator.DROPDOWN_SETTINGS_EDIT) self.permalink = base.Button(driver, self._locator.DROPDOWN_SETTINGS_PERMALINK) self.delete = base.Button(driver, self._locator.DROPDOWN_DELETE) def select_edit(self): raise NotImplementedError def select_get_permalink(self): self.permalink.click() def select_delete(self): """ Returns: DeleteObjectModal """ self.delete.click() return widget_modal.DeleteObjectModal(self._driver) class DropdownSettingsPrograms(_DropdownSettings): def select_edit(self): """ Returns: lhn_modal.new_program.NewProgramModal """ self.edit.click() return lhn_modal.new_program.EditProgramModal(self._driver)
Add missing widget info element
Add missing widget info element
Python
apache-2.0
josthkko/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,prasannav7/ggrc-core,selahssea/ggrc-core,prasannav7/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core
--- +++ @@ -0,0 +1,44 @@ +# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file> +# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> +# Created By: jernej@reciprocitylabs.com +# Maintained By: jernej@reciprocitylabs.com + +from lib import base +from lib.constants import locator +from lib.page import widget_modal +from lib.page import lhn_modal + + +class _DropdownSettings(base.Component): + _locator = locator.Widget + + def __init__(self, driver): + super(_DropdownSettings, self).__init__(driver) + self.edit = base.Button(driver, self._locator.DROPDOWN_SETTINGS_EDIT) + self.permalink = base.Button(driver, + self._locator.DROPDOWN_SETTINGS_PERMALINK) + self.delete = base.Button(driver, self._locator.DROPDOWN_DELETE) + + def select_edit(self): + raise NotImplementedError + + def select_get_permalink(self): + self.permalink.click() + + def select_delete(self): + """ + Returns: + DeleteObjectModal + """ + self.delete.click() + return widget_modal.DeleteObjectModal(self._driver) + + +class DropdownSettingsPrograms(_DropdownSettings): + def select_edit(self): + """ + Returns: + lhn_modal.new_program.NewProgramModal + """ + self.edit.click() + return lhn_modal.new_program.EditProgramModal(self._driver)