commit
stringlengths
40
40
old_file
stringlengths
4
150
new_file
stringlengths
4
150
old_contents
stringlengths
0
3.26k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
501
message
stringlengths
15
4.06k
lang
stringclasses
4 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
diff
stringlengths
0
4.35k
426f90ba500aa5d213a8b130e1841806e2dae388
solver/operators.py
solver/operators.py
import random import bisect def select(population): """Roulette wheel selection. Each individual is selected to reproduce, with probability directly proportional to its fitness score. :params population: Collection of the individuals for selecting. Usage:: >>> from operators import select >>> population = [ 'create population' ] >>> parent = select(population) """ fitnesses = [individual.fitness for individual in population] probability_intervals = [sum(fitnesses[:i+1]) for i in range(len(fitnesses))] random_select = random.uniform(0, probability_intervals[-1]) selected_index = bisect.bisect_left(probability_intervals, random_select) return population[selected_index]
Implement roulette wheel selection algorithm
Implement roulette wheel selection algorithm
Python
mit
nemanja-m/gaps,nemanja-m/genetic-jigsaw-solver
--- +++ @@ -0,0 +1,28 @@ +import random +import bisect + +def select(population): + """Roulette wheel selection. + + Each individual is selected to reproduce, with probability directly + proportional to its fitness score. + + :params population: Collection of the individuals for selecting. + + Usage:: + + >>> from operators import select + >>> population = [ 'create population' ] + >>> parent = select(population) + + """ + + fitnesses = [individual.fitness for individual in population] + probability_intervals = [sum(fitnesses[:i+1]) for i in range(len(fitnesses))] + + random_select = random.uniform(0, probability_intervals[-1]) + selected_index = bisect.bisect_left(probability_intervals, random_select) + + return population[selected_index] + +
31fe72931d29d81088f23c7609aa612d4735814b
python3-py/examples/new_features.py
python3-py/examples/new_features.py
def ls(self, msg, match): """ A sample function to test the parsing of ** resolution """ langs = list(map(lambda x: x.lower(), match.group(1).split())) bears = client.list.bears.get().json() bears = [{**{'name': bear}, **content} for bear, content in bears.items()] # Asyncio example from https://stackabuse.com/python-async-await-tutorial/ import asyncio async def ping_server(ip): pass @asyncio.coroutine def load_file(path): pass
Add new sample files for Python3-py grammar
Add new sample files for Python3-py grammar
Python
mit
antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4,antlr/grammars-v4
--- +++ @@ -0,0 +1,20 @@ +def ls(self, msg, match): + """ + A sample function to test the parsing of ** resolution + """ + langs = list(map(lambda x: x.lower(), match.group(1).split())) + + bears = client.list.bears.get().json() + bears = [{**{'name': bear}, **content} + for bear, content in bears.items()] + +# Asyncio example from https://stackabuse.com/python-async-await-tutorial/ + +import asyncio + +async def ping_server(ip): + pass + +@asyncio.coroutine +def load_file(path): + pass
fac97130396057802f1ebf21928667a971395ba9
examples/ex_tabler.py
examples/ex_tabler.py
from tabler import Tabler table = """<table> <thead> <tr> <th>Number</th> <th>First Name</th> <th>Last Name</th> <th>Phone Number</th> </tr> <tr> <td>1</td> <td>Bob</td> <td>Evans</td> <td>(847) 332-0461</td> </tr> <tr> <td>2</td> <td>Mary</td> <td>Newell</td> <td>(414) 617-9516</td> </tr> </thead> </table>""" parser = Tabler(table) print "There are", len(parser.rows), "rows." print "First names:" for row in parser.rows: print row["first_name"]
Add a basic example of the Tabler API.
Add a basic example of the Tabler API.
Python
bsd-3-clause
bschmeck/tabler
--- +++ @@ -0,0 +1,30 @@ +from tabler import Tabler + +table = """<table> +<thead> + <tr> + <th>Number</th> + <th>First Name</th> + <th>Last Name</th> + <th>Phone Number</th> + </tr> + <tr> + <td>1</td> + <td>Bob</td> + <td>Evans</td> + <td>(847) 332-0461</td> + </tr> + <tr> + <td>2</td> + <td>Mary</td> + <td>Newell</td> + <td>(414) 617-9516</td> + </tr> +</thead> +</table>""" + +parser = Tabler(table) +print "There are", len(parser.rows), "rows." +print "First names:" +for row in parser.rows: + print row["first_name"]
e279f8a046d2d9b985df2b01abe23dbe154da188
cinspect/tests/test_version.py
cinspect/tests/test_version.py
from __future__ import absolute_import, print_function # Standard library import unittest # Local library from cinspect.index.serialize import _get_most_similar class TestVersions(unittest.TestCase): def test_most_similar(self): # Given names = ['index-2.7.3.json', 'index-3.4.json'] version = '2.7.8' # When name = _get_most_similar(version, names) # Then self.assertEqual('index-2.7.3.json', name)
Add a simple test for version finder.
Add a simple test for version finder.
Python
bsd-3-clause
punchagan/cinspect,punchagan/cinspect
--- +++ @@ -0,0 +1,21 @@ +from __future__ import absolute_import, print_function + +# Standard library +import unittest + +# Local library +from cinspect.index.serialize import _get_most_similar + + +class TestVersions(unittest.TestCase): + + def test_most_similar(self): + # Given + names = ['index-2.7.3.json', 'index-3.4.json'] + version = '2.7.8' + + # When + name = _get_most_similar(version, names) + + # Then + self.assertEqual('index-2.7.3.json', name)
fcbfaded67747984899dbbabb2cdcdefe00002df
examples/download1.py
examples/download1.py
import subprocess from simpleflow.download import with_binaries @with_binaries({ "how-is-simpleflow": "s3://botify-labs-simpleflow/binaries/latest/how_is_simpleflow", }) def a_task(): print "command: which how-is-simpleflow" print subprocess.check_output(["which", "how-is-simpleflow"]) print "command: how-is-simpleflow" print subprocess.check_output(["how-is-simpleflow"]) a_task()
Add example script for 'simpleflow.download.with_binaries' decorator
Add example script for 'simpleflow.download.with_binaries' decorator
Python
mit
botify-labs/simpleflow,botify-labs/simpleflow
--- +++ @@ -0,0 +1,17 @@ +import subprocess + +from simpleflow.download import with_binaries + + +@with_binaries({ + "how-is-simpleflow": "s3://botify-labs-simpleflow/binaries/latest/how_is_simpleflow", +}) +def a_task(): + print "command: which how-is-simpleflow" + print subprocess.check_output(["which", "how-is-simpleflow"]) + + print "command: how-is-simpleflow" + print subprocess.check_output(["how-is-simpleflow"]) + + +a_task()
639143e3145682d776251f39f0a791f0c77e5169
apps/domain/tests/test_routes/test_association_requests.py
apps/domain/tests/test_routes/test_association_requests.py
def test_send_association_request(client): result = client.post("/association-requests/request", data={"id": "54623156", "address": "159.15.223.162"}) assert result.status_code == 200 assert result.get_json() == {"msg": "Association request sent!"} def test_receive_association_request(client): result = client.post("/association-requests/receive", data={"id": "54623156", "address": "159.15.223.162"}) assert result.status_code == 200 assert result.get_json() == {"msg": "Association request received!"} def test_reply_association_request(client): result = client.post("/association-requests/respond", data={"id": "54623156", "address": "159.15.223.162"}) assert result.status_code == 200 assert result.get_json() == {"msg": "Association request was replied!"} def get_all_association_requests(client): result = client.get("/association-requests/") assert result.status_code == 200 assert result.get_json() == {"association-requests": ["Network A", "Network B", "Network C"]} def get_specific_association_requests(client): result = client.get("/association-requests/51613546") assert result.status_code == 200 assert result.get_json() == { "association-request": { "ID": "51613546", "address": "156.89.33.200", } } def delete_association_requests(client): result = client.get("/association-requests/51661659") assert result.status_code == 200 assert result.get_json() == {"msg": "Association request deleted!"}
ADD Domain association_requests unit tests
ADD Domain association_requests unit tests
Python
apache-2.0
OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft
--- +++ @@ -0,0 +1,35 @@ + +def test_send_association_request(client): + result = client.post("/association-requests/request", data={"id": "54623156", "address": "159.15.223.162"}) + assert result.status_code == 200 + assert result.get_json() == {"msg": "Association request sent!"} + +def test_receive_association_request(client): + result = client.post("/association-requests/receive", data={"id": "54623156", "address": "159.15.223.162"}) + assert result.status_code == 200 + assert result.get_json() == {"msg": "Association request received!"} + +def test_reply_association_request(client): + result = client.post("/association-requests/respond", data={"id": "54623156", "address": "159.15.223.162"}) + assert result.status_code == 200 + assert result.get_json() == {"msg": "Association request was replied!"} + +def get_all_association_requests(client): + result = client.get("/association-requests/") + assert result.status_code == 200 + assert result.get_json() == {"association-requests": ["Network A", "Network B", "Network C"]} + +def get_specific_association_requests(client): + result = client.get("/association-requests/51613546") + assert result.status_code == 200 + assert result.get_json() == { + "association-request": { + "ID": "51613546", + "address": "156.89.33.200", + } + } + +def delete_association_requests(client): + result = client.get("/association-requests/51661659") + assert result.status_code == 200 + assert result.get_json() == {"msg": "Association request deleted!"}
1d7d86ba12fd00d388b939206abf305a6db569db
reid/train_siamese.py
reid/train_siamese.py
from __future__ import print_function import time from torch.autograd import Variable from .evaluation import accuracy from .utils.meters import AverageMeter class Trainer(object): def __init__(self, model, criterion, args): super(Trainer, self).__init__() self.model = model self.criterion = criterion self.args = args def train(self, epoch, data_loader, optimizer): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() self.model.train() end = time.time() for i, pair in enumerate(data_loader): data_time.update(time.time() - end) inputs1, inputs2, targets = self._parse_data(pair) outputs = self.model(inputs1, inputs2) loss = self.criterion(outputs, targets) prec1, = accuracy(outputs.data, targets.data) losses.update(loss.data[0], inputs1.size(0)) top1.update(prec1[0], inputs1.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() batch_time.update(time.time() - end) end = time.time() if (i + 1) % self.args.print_freq == 0: print('Epoch: [{}][{}/{}]\t' 'Time {:.3f} ({:.3f})\t' 'Data {:.3f} ({:.3f})\t' 'Loss {:.3f} ({:.3f})\t' 'Top1 {:.2%} ({:.2%})\t'.format( epoch, i + 1, len(data_loader), batch_time.val, batch_time.avg, data_time.val, data_time.avg, losses.val, losses.avg, top1.val, top1.avg)) def _parse_data(self, pair): (imgs1, _, pids1, _), (imgs2, _, pids2, _) = pair inputs1, inputs2 = Variable(imgs1), Variable(imgs2) targets = (pids1 == pids2).long().cuda() return inputs1, inputs2, targets
Add trainer for siamese models
Add trainer for siamese models
Python
mit
Flowerfan524/TriClustering,Flowerfan524/TriClustering,zydou/open-reid,Cysu/open-reid
--- +++ @@ -0,0 +1,60 @@ +from __future__ import print_function +import time + +from torch.autograd import Variable + +from .evaluation import accuracy +from .utils.meters import AverageMeter + + +class Trainer(object): + def __init__(self, model, criterion, args): + super(Trainer, self).__init__() + self.model = model + self.criterion = criterion + self.args = args + + def train(self, epoch, data_loader, optimizer): + batch_time = AverageMeter() + data_time = AverageMeter() + losses = AverageMeter() + top1 = AverageMeter() + + self.model.train() + + end = time.time() + for i, pair in enumerate(data_loader): + data_time.update(time.time() - end) + + inputs1, inputs2, targets = self._parse_data(pair) + + outputs = self.model(inputs1, inputs2) + loss = self.criterion(outputs, targets) + + prec1, = accuracy(outputs.data, targets.data) + losses.update(loss.data[0], inputs1.size(0)) + top1.update(prec1[0], inputs1.size(0)) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + batch_time.update(time.time() - end) + end = time.time() + + if (i + 1) % self.args.print_freq == 0: + print('Epoch: [{}][{}/{}]\t' + 'Time {:.3f} ({:.3f})\t' + 'Data {:.3f} ({:.3f})\t' + 'Loss {:.3f} ({:.3f})\t' + 'Top1 {:.2%} ({:.2%})\t'.format( + epoch, i + 1, len(data_loader), + batch_time.val, batch_time.avg, + data_time.val, data_time.avg, + losses.val, losses.avg, top1.val, top1.avg)) + + def _parse_data(self, pair): + (imgs1, _, pids1, _), (imgs2, _, pids2, _) = pair + inputs1, inputs2 = Variable(imgs1), Variable(imgs2) + targets = (pids1 == pids2).long().cuda() + return inputs1, inputs2, targets
528afdc0f00b958f6920bd6e66c3bac841b3a8b8
server/kcaa/kcsapi/mission_test.py
server/kcaa/kcsapi/mission_test.py
#!/usr/bin/env python import pytest import mission class TestMissionList(object): def pytest_funcarg__mission_list(self): mission_list = mission.MissionList() mission_list.missions.extend([ mission.Mission( id=1, name=u'Mission1', maparea=mission.Mission.MAPAREA_BASE), mission.Mission( id=2, name=u'Mission2', maparea=mission.Mission.MAPAREA_BASE), mission.Mission( id=3, name=u'Mission3', maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS), mission.Mission( id=4, name=u'Mission4', maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS), mission.Mission( id=5, name=u'Mission5', maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS)]) return mission_list def test_get_mission(self, mission_list): assert mission_list.get_mission(0) is None assert mission_list.get_mission(1) == mission_list.missions[0] assert mission_list.get_mission(2) == mission_list.missions[1] assert mission_list.get_mission(3) == mission_list.missions[2] assert mission_list.get_mission(4) == mission_list.missions[3] assert mission_list.get_mission(5) == mission_list.missions[4] assert mission_list.get_mission(6) is None def test_get_index_in_mapaea(self, mission_list): assert mission_list.get_index_in_maparea(mission_list.missions[0]) == 0 assert mission_list.get_index_in_maparea(mission_list.missions[1]) == 1 assert mission_list.get_index_in_maparea(mission_list.missions[2]) == 0 assert mission_list.get_index_in_maparea(mission_list.missions[3]) == 1 assert mission_list.get_index_in_maparea(mission_list.missions[4]) == 2 def main(): import doctest doctest.testmod(mission) import sys sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')])) if __name__ == '__main__': main()
Add a test for mission.
Add a test for mission.
Python
apache-2.0
kcaa/kcaa,kcaa/kcaa,kcaa/kcaa,kcaa/kcaa
--- +++ @@ -0,0 +1,60 @@ +#!/usr/bin/env python + +import pytest + +import mission + + +class TestMissionList(object): + + def pytest_funcarg__mission_list(self): + mission_list = mission.MissionList() + mission_list.missions.extend([ + mission.Mission( + id=1, + name=u'Mission1', + maparea=mission.Mission.MAPAREA_BASE), + mission.Mission( + id=2, + name=u'Mission2', + maparea=mission.Mission.MAPAREA_BASE), + mission.Mission( + id=3, + name=u'Mission3', + maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS), + mission.Mission( + id=4, + name=u'Mission4', + maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS), + mission.Mission( + id=5, + name=u'Mission5', + maparea=mission.Mission.MAPAREA_SOUTHWESTERN_ISLANDS)]) + return mission_list + + def test_get_mission(self, mission_list): + assert mission_list.get_mission(0) is None + assert mission_list.get_mission(1) == mission_list.missions[0] + assert mission_list.get_mission(2) == mission_list.missions[1] + assert mission_list.get_mission(3) == mission_list.missions[2] + assert mission_list.get_mission(4) == mission_list.missions[3] + assert mission_list.get_mission(5) == mission_list.missions[4] + assert mission_list.get_mission(6) is None + + def test_get_index_in_mapaea(self, mission_list): + assert mission_list.get_index_in_maparea(mission_list.missions[0]) == 0 + assert mission_list.get_index_in_maparea(mission_list.missions[1]) == 1 + assert mission_list.get_index_in_maparea(mission_list.missions[2]) == 0 + assert mission_list.get_index_in_maparea(mission_list.missions[3]) == 1 + assert mission_list.get_index_in_maparea(mission_list.missions[4]) == 2 + + +def main(): + import doctest + doctest.testmod(mission) + import sys + sys.exit(pytest.main(args=[__file__.replace('.pyc', '.py')])) + + +if __name__ == '__main__': + main()
38e294e7d8e8053ac604fdbcdcaeed59fecae1e9
tests/test_ewf.py
tests/test_ewf.py
from os import path from digestive.ewf import EWFSource, format_supported, list_ewf_files here = path.dirname(path.abspath(__file__)) def test_format_supported(): supported = ['file.S01', 'file.E01', 'file.e01', 'file.L01', 'file.Ex01', 'file.Lx01', 'file.tar.E01'] not_supported = ['file.dd', 'file.raw', 'file.E1', 'file.Ex1', 'file.tar.gz', 'file.E01.raw'] for supported in supported: assert format_supported(supported) for not_supported in not_supported: assert not format_supported(not_supported) def test_list_ewf_files(): files = [path.join(here, 'files/random.E01'), path.join(here, 'files/random.E02')] assert list_ewf_files(path.join(here, 'files/random.E01')) == files # non-primary file is not handled as the beginning of a set assert list_ewf_files(path.join(here, 'files/random.E02')) == [path.join(here, 'files/random.E02')] def test_ewf_source_simple(): source = EWFSource(path.join(here, 'files/random.E01')) # random.E01 has two parts, should be visible assert str(source) == path.join(here, 'files/random.E01') + '..E02'
Test libewf-less calls in ewf module
Test libewf-less calls in ewf module
Python
isc
akaIDIOT/Digestive
--- +++ @@ -0,0 +1,30 @@ +from os import path + +from digestive.ewf import EWFSource, format_supported, list_ewf_files + + +here = path.dirname(path.abspath(__file__)) + + +def test_format_supported(): + supported = ['file.S01', 'file.E01', 'file.e01', 'file.L01', 'file.Ex01', 'file.Lx01', 'file.tar.E01'] + not_supported = ['file.dd', 'file.raw', 'file.E1', 'file.Ex1', 'file.tar.gz', 'file.E01.raw'] + + for supported in supported: + assert format_supported(supported) + + for not_supported in not_supported: + assert not format_supported(not_supported) + + +def test_list_ewf_files(): + files = [path.join(here, 'files/random.E01'), path.join(here, 'files/random.E02')] + assert list_ewf_files(path.join(here, 'files/random.E01')) == files + # non-primary file is not handled as the beginning of a set + assert list_ewf_files(path.join(here, 'files/random.E02')) == [path.join(here, 'files/random.E02')] + + +def test_ewf_source_simple(): + source = EWFSource(path.join(here, 'files/random.E01')) + # random.E01 has two parts, should be visible + assert str(source) == path.join(here, 'files/random.E01') + '..E02'
fbc375a51aca560554f3dd28fa212d6d877449f2
source/tests/core/test_observer.py
source/tests/core/test_observer.py
from copy import copy from pytest import fixture from vistas.core.observers.interface import * @fixture(scope='session') def observer(): class TestObserver(Observer): def __init__(self): self.x = 5 def update(self, observable): self.x **= 2 obs = TestObserver() yield obs def test_add_observer(observer): obs = Observable() obs.add_observer(observer) assert len(obs.observers) == 1 obs.add_observer(observer) assert len(obs.observers) == 1 def test_cls_observers(): assert len(Observable.observers) == 1 def test_notify_observers(observer): obs = Observable() obs.notify_observers() assert observer.x == 25 def test_remove_observer(observer): observer2 = copy(observer) obs = Observable() obs.add_observer(observer2) assert len(obs.observers) == 2 # Test removal obs.remove_observer(observer) assert len(obs.observers) == 1 # Test unique removal obs.remove_observer(observer) assert len(obs.observers) == 1 obs.remove_observer(observer2) assert len(obs.observers) == 0
Add tests for observers and observable.
Add tests for observers and observable.
Python
bsd-3-clause
VISTAS-IVES/pyvistas
--- +++ @@ -0,0 +1,52 @@ +from copy import copy +from pytest import fixture + +from vistas.core.observers.interface import * + + +@fixture(scope='session') +def observer(): + class TestObserver(Observer): + def __init__(self): + self.x = 5 + + def update(self, observable): + self.x **= 2 + obs = TestObserver() + yield obs + + +def test_add_observer(observer): + obs = Observable() + obs.add_observer(observer) + assert len(obs.observers) == 1 + obs.add_observer(observer) + assert len(obs.observers) == 1 + + +def test_cls_observers(): + assert len(Observable.observers) == 1 + + +def test_notify_observers(observer): + obs = Observable() + obs.notify_observers() + assert observer.x == 25 + + +def test_remove_observer(observer): + observer2 = copy(observer) + obs = Observable() + obs.add_observer(observer2) + assert len(obs.observers) == 2 + + # Test removal + obs.remove_observer(observer) + assert len(obs.observers) == 1 + + # Test unique removal + obs.remove_observer(observer) + assert len(obs.observers) == 1 + + obs.remove_observer(observer2) + assert len(obs.observers) == 0
3c5116f3a26fb93ab85fd973462a582a0fa5d877
bin/validate-presets.py
bin/validate-presets.py
#!/usr/bin/python import json import sys def validate_presets(presets_file): with open(presets_file) as jsonfile: presets_dict = json.load(jsonfile) for handler in presets_dict.iterkeys(): for entry in presets_dict.get(handler).get("presets"): value = entry.get("value") if not value.startswith(handler): print "ERROR: \"{0}\" handler with \"{1}\" value".format(handler, value) if __name__ == '__main__': args = sys.argv[1:] try: validate_presets(*args) except TypeError: print "{0} <handlerpresets.json>".format(sys.argv[0]) sys.exit(1)
Add script to validate web/db/handlerpresets.json file
Add script to validate web/db/handlerpresets.json file Simple validation for `web/db/handlerpresets.json` file. It prints error if handler's name is not same as beginning of value's string. Catches problems like this in handlerpresets.json: ```json "slack:": { "presets": [ { "description": "Opens man page", "value": "x-man-page://1/man" } ] } ``` Exmple command line usage: ```shell $ ./bin/validate-presets.py web/db/handlerpresets.json ERROR: "ms-word:" handler with "word:http://www.microsoft.com/investor/downloads/events/CreditSuisseReller.docx" value ERROR: "slack:" handler with "x-man-page://1/man" value ERROR: "word:" handler with "ms-word:http://www.microsoft.com/investor/downloads/events/CreditSuisseReller.docx" value ERROR: "map:" handler with "maps://?ll=50.894967,4.341626" value ERROR: "map:" handler with "maps://?daddr=San+Francisco&dirflg=d&t=h" value ERROR: "map:" handler with "maps://?saddr=San+Jose&daddr=San+Francisco&dirflg=r" value ERROR: "map:" handler with "maps://?address=1,Infinite+Loop,Cupertino,California" value ```
Python
mit
nkapu/handlers,ouspg/urlhandlers,ouspg/urlhandlers,nkapu/handlers,nkapu/handlers,nkapu/handlers,ouspg/urlhandlers,ouspg/urlhandlers
--- +++ @@ -0,0 +1,25 @@ +#!/usr/bin/python + +import json +import sys + + +def validate_presets(presets_file): + with open(presets_file) as jsonfile: + presets_dict = json.load(jsonfile) + + for handler in presets_dict.iterkeys(): + for entry in presets_dict.get(handler).get("presets"): + value = entry.get("value") + + if not value.startswith(handler): + print "ERROR: \"{0}\" handler with \"{1}\" value".format(handler, value) + + +if __name__ == '__main__': + args = sys.argv[1:] + try: + validate_presets(*args) + except TypeError: + print "{0} <handlerpresets.json>".format(sys.argv[0]) + sys.exit(1)
24b6126871a5378faa2e8f9848c279999e50cb96
ooo.py
ooo.py
#!/usr/bin/python import os import sys import re from collections import defaultdict COMIC_RE = re.compile(r'^\d+ +([^#]+)#(\d+)') def lines(todofile): with open(todofile) as todolines: for line in todolines: title_match = COMIC_RE.match(line) if title_match: # (title, issue) yield line.strip(), title_match.group(1), int(title_match.group(2)) def issues(todofile): seen = defaultdict(int) for line, title, issue in lines(todofile): if issue and seen[title] and issue != seen[title]+1: yield line, seen[title] seen[title] = issue def main(files): for todofile in files: for issue, lastissue in issues(todofile): print "%s (last seen %d)" % (issue, lastissue) if __name__ == '__main__': main(sys.argv[1:])
Check issue numbers to find out of order listings
Check issue numbers to find out of order listings
Python
mit
xchewtoyx/comicmgt,xchewtoyx/comicmgt
--- +++ @@ -0,0 +1,31 @@ +#!/usr/bin/python + +import os +import sys +import re +from collections import defaultdict + +COMIC_RE = re.compile(r'^\d+ +([^#]+)#(\d+)') + +def lines(todofile): + with open(todofile) as todolines: + for line in todolines: + title_match = COMIC_RE.match(line) + if title_match: + # (title, issue) + yield line.strip(), title_match.group(1), int(title_match.group(2)) + +def issues(todofile): + seen = defaultdict(int) + for line, title, issue in lines(todofile): + if issue and seen[title] and issue != seen[title]+1: + yield line, seen[title] + seen[title] = issue + +def main(files): + for todofile in files: + for issue, lastissue in issues(todofile): + print "%s (last seen %d)" % (issue, lastissue) + +if __name__ == '__main__': + main(sys.argv[1:])
94d7a3b01b7360001817ef3ed3ad2003f0722b14
tests/script_parser/test_parsing_complex.py
tests/script_parser/test_parsing_complex.py
from script_parser import parser import os def test_environ_init(): """ Set up variables in environment and check parser uses them to init properly. """ os.environ['client_id'] = 'x' os.environ['client_secret'] = 'y' os.environ['refresh_token'] = 'z' p = parser.Parser(['chrome.init ${env.client_id} ${env.client_secret} ${env.refresh_token}']) p.execute() assert p.variables['client_id'] == 'x' assert p.variables['client_secret'] == 'y' assert p.variables['refresh_token'] == 'z'
Add complex parse scenario - environment variables and init
Add complex parse scenario - environment variables and init
Python
mit
melkamar/webstore-manager,melkamar/webstore-manager
--- +++ @@ -0,0 +1,17 @@ +from script_parser import parser +import os + + +def test_environ_init(): + """ Set up variables in environment and check parser uses them to init properly. """ + + os.environ['client_id'] = 'x' + os.environ['client_secret'] = 'y' + os.environ['refresh_token'] = 'z' + + p = parser.Parser(['chrome.init ${env.client_id} ${env.client_secret} ${env.refresh_token}']) + p.execute() + + assert p.variables['client_id'] == 'x' + assert p.variables['client_secret'] == 'y' + assert p.variables['refresh_token'] == 'z'
d9720f1fcc3013324c9ea58620df9c458a2e314e
test/test_awc.py
test/test_awc.py
import pytest import FIAT import finat import numpy as np from gem.interpreter import evaluate from fiat_mapping import MyMapping def test_morley(): ref_cell = FIAT.ufc_simplex(2) ref_element = finat.ArnoldWinther(ref_cell, 3) ref_pts = finat.point_set.PointSet(ref_cell.make_points(2, 0, 3)) phys_cell = FIAT.ufc_simplex(2) phys_cell.vertices = ((0.0, 0.1), (1.17, -0.09), (0.15, 1.84)) mppng = MyMapping(ref_cell, phys_cell) z = (0, 0) finat_vals_gem = ref_element.basis_evaluation(0, ref_pts, coordinate_mapping=mppng)[z] finat_vals = evaluate([finat_vals_gem])[0].arr phys_cell_FIAT = FIAT.ArnoldWinther(phys_cell, 3) phys_points = phys_cell.make_points(2, 0, 3) phys_vals = phys_cell_FIAT.tabulate(0, phys_points)[z] phys_vals = phys_vals[:24].transpose((3, 0, 1, 2)) assert(np.allclose(finat_vals, phys_vals))
Add (broken) AWc test for debugging purposes
Add (broken) AWc test for debugging purposes
Python
mit
FInAT/FInAT
--- +++ @@ -0,0 +1,27 @@ +import pytest +import FIAT +import finat +import numpy as np +from gem.interpreter import evaluate +from fiat_mapping import MyMapping + + +def test_morley(): + ref_cell = FIAT.ufc_simplex(2) + ref_element = finat.ArnoldWinther(ref_cell, 3) + ref_pts = finat.point_set.PointSet(ref_cell.make_points(2, 0, 3)) + + phys_cell = FIAT.ufc_simplex(2) + phys_cell.vertices = ((0.0, 0.1), (1.17, -0.09), (0.15, 1.84)) + + mppng = MyMapping(ref_cell, phys_cell) + z = (0, 0) + finat_vals_gem = ref_element.basis_evaluation(0, ref_pts, coordinate_mapping=mppng)[z] + finat_vals = evaluate([finat_vals_gem])[0].arr + + phys_cell_FIAT = FIAT.ArnoldWinther(phys_cell, 3) + phys_points = phys_cell.make_points(2, 0, 3) + phys_vals = phys_cell_FIAT.tabulate(0, phys_points)[z] + phys_vals = phys_vals[:24].transpose((3, 0, 1, 2)) + + assert(np.allclose(finat_vals, phys_vals))
18c93f4c70a2247bcce8a853c30038097cb9f7b2
cms/apps/pages/tests/test_admin_destructive.py
cms/apps/pages/tests/test_admin_destructive.py
from django.conf import settings from django.contrib import admin from django.test import TestCase from ..models import Country, CountryGroup, Page import sys class TestArticleAdminBase(TestCase): def test_article_admin(self): NEW_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES + ( 'cms.middleware.LocalisationMiddleware', ) self.assertNotIn(Country, admin.site._registry) self.assertNotIn(CountryGroup, admin.site._registry) with self.settings(MIDDLEWARE_CLASSES=NEW_MIDDLEWARE_CLASSES): module = sys.modules['cms.apps.pages.admin'] del sys.modules['cms.apps.pages.admin'] admin.site.unregister(Page) from ..admin import page_admin assert page_admin self.assertIn(Country, admin.site._registry) self.assertIn(CountryGroup, admin.site._registry) sys.modules['cms.apps.pages.admin'] = module
Add test for optional page admin registrations for Country and CountryGroup.
Add test for optional page admin registrations for Country and CountryGroup.
Python
bsd-3-clause
jamesfoley/cms,jamesfoley/cms,danielsamuels/cms,lewiscollard/cms,jamesfoley/cms,dan-gamble/cms,lewiscollard/cms,danielsamuels/cms,danielsamuels/cms,lewiscollard/cms,dan-gamble/cms,jamesfoley/cms,dan-gamble/cms
--- +++ @@ -0,0 +1,32 @@ +from django.conf import settings +from django.contrib import admin +from django.test import TestCase + +from ..models import Country, CountryGroup, Page + +import sys + + +class TestArticleAdminBase(TestCase): + + def test_article_admin(self): + NEW_MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES + ( + 'cms.middleware.LocalisationMiddleware', + ) + + self.assertNotIn(Country, admin.site._registry) + self.assertNotIn(CountryGroup, admin.site._registry) + + with self.settings(MIDDLEWARE_CLASSES=NEW_MIDDLEWARE_CLASSES): + module = sys.modules['cms.apps.pages.admin'] + del sys.modules['cms.apps.pages.admin'] + + admin.site.unregister(Page) + + from ..admin import page_admin + assert page_admin + + self.assertIn(Country, admin.site._registry) + self.assertIn(CountryGroup, admin.site._registry) + + sys.modules['cms.apps.pages.admin'] = module
d34ad4b0b969dd6c10fc7c1646f934016ba8ddd7
tools/distrib/c-ish/check_documentation.py
tools/distrib/c-ish/check_documentation.py
#!/usr/bin/env python2.7 # Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # check for directory level 'README.md' files # check that all implementation and interface files have a \file doxygen comment import os import sys # where do we run _TARGET_DIRS = [ 'include/grpc', 'include/grpc++', 'src/core', 'src/cpp', 'test/core', 'test/cpp' ] # which file extensions do we care about _INTERESTING_EXTENSIONS = [ '.c', '.h', '.cc' ] # find our home _ROOT = os.path.abspath( os.path.join(os.path.dirname(sys.argv[0]), '../../..')) os.chdir(_ROOT) errors = 0 # walk directories, find things for target_dir in _TARGET_DIRS: for root, dirs, filenames in os.walk(target_dir): if 'README.md' not in filenames: print '%s: missing README.md' % root errors += 1 for filename in filenames: if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS: continue path = os.path.join(root, filename) with open(path) as f: contents = f.read() if '\\file' not in contents: print '%s: no \\file comment' % path errors += 1 assert errors == 0, 'error count = %d' % errors
Add a script to list undocumented files and directories
Add a script to list undocumented files and directories
Python
apache-2.0
makdharma/grpc,yugui/grpc,simonkuang/grpc,dklempner/grpc,matt-kwong/grpc,apolcyn/grpc,kumaralokgithub/grpc,geffzhang/grpc,ctiller/grpc,thinkerou/grpc,chrisdunelm/grpc,ejona86/grpc,donnadionne/grpc,yugui/grpc,nicolasnoble/grpc,dgquintas/grpc,adelez/grpc,PeterFaiman/ruby-grpc-minimal,kpayson64/grpc,grani/grpc,makdharma/grpc,greasypizza/grpc,thunderboltsid/grpc,muxi/grpc,simonkuang/grpc,simonkuang/grpc,kpayson64/grpc,vsco/grpc,geffzhang/grpc,vsco/grpc,infinit/grpc,philcleveland/grpc,daniel-j-born/grpc,jcanizales/grpc,PeterFaiman/ruby-grpc-minimal,yang-g/grpc,Crevil/grpc,carl-mastrangelo/grpc,thinkerou/grpc,jtattermusch/grpc,donnadionne/grpc,muxi/grpc,msmania/grpc,donnadionne/grpc,infinit/grpc,ctiller/grpc,greasypizza/grpc,LuminateWireless/grpc,donnadionne/grpc,kriswuollett/grpc,malexzx/grpc,Vizerai/grpc,dgquintas/grpc,wcevans/grpc,kpayson64/grpc,7anner/grpc,hstefan/grpc,muxi/grpc,stanley-cheung/grpc,royalharsh/grpc,thinkerou/grpc,jtattermusch/grpc,infinit/grpc,grani/grpc,kumaralokgithub/grpc,sreecha/grpc,firebase/grpc,wcevans/grpc,pszemus/grpc,deepaklukose/grpc,daniel-j-born/grpc,Vizerai/grpc,mehrdada/grpc,philcleveland/grpc,mehrdada/grpc,jtattermusch/grpc,soltanmm-google/grpc,zhimingxie/grpc,apolcyn/grpc,y-zeng/grpc,vjpai/grpc,thinkerou/grpc,makdharma/grpc,vjpai/grpc,vjpai/grpc,carl-mastrangelo/grpc,wcevans/grpc,thunderboltsid/grpc,y-zeng/grpc,andrewpollock/grpc,thinkerou/grpc,zhimingxie/grpc,y-zeng/grpc,ctiller/grpc,msmania/grpc,fuchsia-mirror/third_party-grpc,baylabs/grpc,kumaralokgithub/grpc,muxi/grpc,ncteisen/grpc,philcleveland/grpc,kskalski/grpc,ipylypiv/grpc,pszemus/grpc,firebase/grpc,nicolasnoble/grpc,jboeuf/grpc,dgquintas/grpc,fuchsia-mirror/third_party-grpc,malexzx/grpc,makdharma/grpc,jcanizales/grpc,ctiller/grpc,stanley-cheung/grpc,thinkerou/grpc,andrewpollock/grpc,matt-kwong/grpc,kriswuollett/grpc,nicolasnoble/grpc,kpayson64/grpc,muxi/grpc,yongni/grpc,perumaalgoog/grpc,vsco/grpc,greasypizza/grpc,ctiller/grpc,greasypizza/grpc,ppietrasa/grpc,MakMukhi/grpc,wcevans/grpc,PeterFaiman/ruby-grpc-minimal,kskalski/grpc,greasypizza/grpc,ncteisen/grpc,murgatroid99/grpc,pszemus/grpc,hstefan/grpc,jboeuf/grpc,mehrdada/grpc,a11r/grpc,philcleveland/grpc,Vizerai/grpc,vjpai/grpc,fuchsia-mirror/third_party-grpc,dgquintas/grpc,muxi/grpc,LuminateWireless/grpc,ejona86/grpc,ppietrasa/grpc,Crevil/grpc,kpayson64/grpc,rjshade/grpc,thinkerou/grpc,greasypizza/grpc,ctiller/grpc,simonkuang/grpc,Crevil/grpc,adelez/grpc,PeterFaiman/ruby-grpc-minimal,deepaklukose/grpc,ejona86/grpc,MakMukhi/grpc,perumaalgoog/grpc,ejona86/grpc,hstefan/grpc,stanley-cheung/grpc,royalharsh/grpc,ejona86/grpc,kpayson64/grpc,yongni/grpc,dgquintas/grpc,soltanmm/grpc,yang-g/grpc,kskalski/grpc,kriswuollett/grpc,perumaalgoog/grpc,dklempner/grpc,msmania/grpc,firebase/grpc,sreecha/grpc,pszemus/grpc,pmarks-net/grpc,matt-kwong/grpc,pszemus/grpc,malexzx/grpc,chrisdunelm/grpc,rjshade/grpc,vjpai/grpc,rjshade/grpc,jtattermusch/grpc,stanley-cheung/grpc,quizlet/grpc,PeterFaiman/ruby-grpc-minimal,ipylypiv/grpc,thinkerou/grpc,nicolasnoble/grpc,thunderboltsid/grpc,Vizerai/grpc,kumaralokgithub/grpc,sreecha/grpc,stanley-cheung/grpc,adelez/grpc,LuminateWireless/grpc,kumaralokgithub/grpc,matt-kwong/grpc,carl-mastrangelo/grpc,fuchsia-mirror/third_party-grpc,y-zeng/grpc,soltanmm/grpc,simonkuang/grpc,yongni/grpc,dgquintas/grpc,sreecha/grpc,firebase/grpc,msmania/grpc,ppietrasa/grpc,thinkerou/grpc,wcevans/grpc,Vizerai/grpc,thunderboltsid/grpc,MakMukhi/grpc,Crevil/grpc,nicolasnoble/grpc,fuchsia-mirror/third_party-grpc,PeterFaiman/ruby-grpc-minimal,pmarks-net/grpc,quizlet/grpc,jboeuf/grpc,sreecha/grpc,murgatroid99/grpc,mehrdada/grpc,a11r/grpc,deepaklukose/grpc,murgatroid99/grpc,jtattermusch/grpc,7anner/grpc,Crevil/grpc,ppietrasa/grpc,grpc/grpc,ncteisen/grpc,7anner/grpc,simonkuang/grpc,baylabs/grpc,jboeuf/grpc,ipylypiv/grpc,ipylypiv/grpc,firebase/grpc,perumaalgoog/grpc,LuminateWireless/grpc,sreecha/grpc,quizlet/grpc,geffzhang/grpc,firebase/grpc,pszemus/grpc,apolcyn/grpc,pmarks-net/grpc,firebase/grpc,hstefan/grpc,LuminateWireless/grpc,grpc/grpc,muxi/grpc,soltanmm/grpc,carl-mastrangelo/grpc,mehrdada/grpc,ipylypiv/grpc,andrewpollock/grpc,ncteisen/grpc,thunderboltsid/grpc,geffzhang/grpc,donnadionne/grpc,firebase/grpc,daniel-j-born/grpc,chrisdunelm/grpc,vjpai/grpc,adelez/grpc,a11r/grpc,perumaalgoog/grpc,malexzx/grpc,kriswuollett/grpc,yongni/grpc,carl-mastrangelo/grpc,vsco/grpc,geffzhang/grpc,grani/grpc,kskalski/grpc,rjshade/grpc,chrisdunelm/grpc,vjpai/grpc,hstefan/grpc,quizlet/grpc,zhimingxie/grpc,stanley-cheung/grpc,yugui/grpc,Crevil/grpc,daniel-j-born/grpc,LuminateWireless/grpc,pszemus/grpc,sreecha/grpc,chrisdunelm/grpc,msmania/grpc,grani/grpc,muxi/grpc,jcanizales/grpc,geffzhang/grpc,Vizerai/grpc,ctiller/grpc,ppietrasa/grpc,Crevil/grpc,y-zeng/grpc,vjpai/grpc,firebase/grpc,soltanmm/grpc,dklempner/grpc,chrisdunelm/grpc,vjpai/grpc,jcanizales/grpc,royalharsh/grpc,y-zeng/grpc,apolcyn/grpc,vsco/grpc,infinit/grpc,murgatroid99/grpc,wcevans/grpc,firebase/grpc,wcevans/grpc,Vizerai/grpc,ncteisen/grpc,kskalski/grpc,vjpai/grpc,baylabs/grpc,infinit/grpc,kpayson64/grpc,philcleveland/grpc,kpayson64/grpc,stanley-cheung/grpc,soltanmm-google/grpc,ncteisen/grpc,yang-g/grpc,msmania/grpc,hstefan/grpc,adelez/grpc,mehrdada/grpc,perumaalgoog/grpc,apolcyn/grpc,dklempner/grpc,donnadionne/grpc,andrewpollock/grpc,adelez/grpc,thinkerou/grpc,thunderboltsid/grpc,dgquintas/grpc,y-zeng/grpc,nicolasnoble/grpc,andrewpollock/grpc,ejona86/grpc,quizlet/grpc,greasypizza/grpc,deepaklukose/grpc,apolcyn/grpc,kpayson64/grpc,grani/grpc,apolcyn/grpc,Crevil/grpc,donnadionne/grpc,muxi/grpc,hstefan/grpc,apolcyn/grpc,jcanizales/grpc,ejona86/grpc,jtattermusch/grpc,baylabs/grpc,grpc/grpc,jtattermusch/grpc,infinit/grpc,donnadionne/grpc,muxi/grpc,sreecha/grpc,kumaralokgithub/grpc,wcevans/grpc,jboeuf/grpc,mehrdada/grpc,7anner/grpc,y-zeng/grpc,nicolasnoble/grpc,muxi/grpc,grpc/grpc,zhimingxie/grpc,andrewpollock/grpc,baylabs/grpc,andrewpollock/grpc,murgatroid99/grpc,jboeuf/grpc,philcleveland/grpc,pszemus/grpc,ejona86/grpc,Vizerai/grpc,quizlet/grpc,jcanizales/grpc,mehrdada/grpc,perumaalgoog/grpc,kpayson64/grpc,murgatroid99/grpc,yang-g/grpc,vjpai/grpc,ctiller/grpc,royalharsh/grpc,royalharsh/grpc,malexzx/grpc,MakMukhi/grpc,ppietrasa/grpc,daniel-j-born/grpc,ncteisen/grpc,malexzx/grpc,mehrdada/grpc,stanley-cheung/grpc,hstefan/grpc,PeterFaiman/ruby-grpc-minimal,firebase/grpc,geffzhang/grpc,ctiller/grpc,rjshade/grpc,grpc/grpc,adelez/grpc,makdharma/grpc,ppietrasa/grpc,yugui/grpc,matt-kwong/grpc,donnadionne/grpc,jtattermusch/grpc,ipylypiv/grpc,dklempner/grpc,hstefan/grpc,pmarks-net/grpc,perumaalgoog/grpc,ipylypiv/grpc,stanley-cheung/grpc,dklempner/grpc,kskalski/grpc,mehrdada/grpc,zhimingxie/grpc,grani/grpc,daniel-j-born/grpc,deepaklukose/grpc,philcleveland/grpc,apolcyn/grpc,yugui/grpc,soltanmm-google/grpc,yongni/grpc,simonkuang/grpc,kskalski/grpc,fuchsia-mirror/third_party-grpc,grani/grpc,soltanmm/grpc,a11r/grpc,matt-kwong/grpc,grpc/grpc,ejona86/grpc,pmarks-net/grpc,thunderboltsid/grpc,a11r/grpc,ncteisen/grpc,philcleveland/grpc,baylabs/grpc,vsco/grpc,daniel-j-born/grpc,jboeuf/grpc,deepaklukose/grpc,kriswuollett/grpc,thinkerou/grpc,dgquintas/grpc,dgquintas/grpc,yongni/grpc,deepaklukose/grpc,malexzx/grpc,sreecha/grpc,yang-g/grpc,ppietrasa/grpc,7anner/grpc,carl-mastrangelo/grpc,yugui/grpc,soltanmm/grpc,zhimingxie/grpc,sreecha/grpc,zhimingxie/grpc,soltanmm/grpc,pszemus/grpc,soltanmm/grpc,jtattermusch/grpc,thunderboltsid/grpc,pmarks-net/grpc,Crevil/grpc,fuchsia-mirror/third_party-grpc,carl-mastrangelo/grpc,MakMukhi/grpc,makdharma/grpc,chrisdunelm/grpc,ejona86/grpc,jtattermusch/grpc,yugui/grpc,ncteisen/grpc,geffzhang/grpc,makdharma/grpc,nicolasnoble/grpc,jtattermusch/grpc,zhimingxie/grpc,infinit/grpc,ncteisen/grpc,dklempner/grpc,pmarks-net/grpc,msmania/grpc,carl-mastrangelo/grpc,soltanmm-google/grpc,Vizerai/grpc,pszemus/grpc,stanley-cheung/grpc,daniel-j-born/grpc,donnadionne/grpc,MakMukhi/grpc,royalharsh/grpc,ipylypiv/grpc,carl-mastrangelo/grpc,kumaralokgithub/grpc,royalharsh/grpc,soltanmm/grpc,thinkerou/grpc,grani/grpc,murgatroid99/grpc,chrisdunelm/grpc,perumaalgoog/grpc,jcanizales/grpc,sreecha/grpc,rjshade/grpc,7anner/grpc,simonkuang/grpc,baylabs/grpc,sreecha/grpc,vsco/grpc,MakMukhi/grpc,LuminateWireless/grpc,carl-mastrangelo/grpc,a11r/grpc,nicolasnoble/grpc,ppietrasa/grpc,a11r/grpc,PeterFaiman/ruby-grpc-minimal,baylabs/grpc,7anner/grpc,fuchsia-mirror/third_party-grpc,yang-g/grpc,ncteisen/grpc,a11r/grpc,quizlet/grpc,kriswuollett/grpc,yang-g/grpc,ncteisen/grpc,yang-g/grpc,LuminateWireless/grpc,royalharsh/grpc,yang-g/grpc,kriswuollett/grpc,quizlet/grpc,simonkuang/grpc,kpayson64/grpc,kriswuollett/grpc,kriswuollett/grpc,dgquintas/grpc,vsco/grpc,quizlet/grpc,Vizerai/grpc,soltanmm-google/grpc,murgatroid99/grpc,soltanmm-google/grpc,infinit/grpc,a11r/grpc,jtattermusch/grpc,donnadionne/grpc,matt-kwong/grpc,makdharma/grpc,jboeuf/grpc,zhimingxie/grpc,stanley-cheung/grpc,ctiller/grpc,geffzhang/grpc,nicolasnoble/grpc,nicolasnoble/grpc,rjshade/grpc,jcanizales/grpc,carl-mastrangelo/grpc,grpc/grpc,mehrdada/grpc,jboeuf/grpc,pmarks-net/grpc,Vizerai/grpc,yongni/grpc,fuchsia-mirror/third_party-grpc,greasypizza/grpc,grpc/grpc,jboeuf/grpc,7anner/grpc,PeterFaiman/ruby-grpc-minimal,grpc/grpc,rjshade/grpc,malexzx/grpc,PeterFaiman/ruby-grpc-minimal,mehrdada/grpc,soltanmm-google/grpc,greasypizza/grpc,andrewpollock/grpc,firebase/grpc,yongni/grpc,fuchsia-mirror/third_party-grpc,deepaklukose/grpc,ejona86/grpc,jcanizales/grpc,thunderboltsid/grpc,philcleveland/grpc,ejona86/grpc,chrisdunelm/grpc,jboeuf/grpc,wcevans/grpc,kumaralokgithub/grpc,yongni/grpc,andrewpollock/grpc,dgquintas/grpc,nicolasnoble/grpc,MakMukhi/grpc,matt-kwong/grpc,soltanmm-google/grpc,7anner/grpc,kskalski/grpc,matt-kwong/grpc,adelez/grpc,carl-mastrangelo/grpc,msmania/grpc,pszemus/grpc,chrisdunelm/grpc,deepaklukose/grpc,pszemus/grpc,kumaralokgithub/grpc,ctiller/grpc,ctiller/grpc,vjpai/grpc,msmania/grpc,stanley-cheung/grpc,grpc/grpc,infinit/grpc,y-zeng/grpc,murgatroid99/grpc,baylabs/grpc,jboeuf/grpc,malexzx/grpc,rjshade/grpc,grani/grpc,donnadionne/grpc,daniel-j-born/grpc,yugui/grpc,royalharsh/grpc,adelez/grpc,soltanmm-google/grpc,murgatroid99/grpc,grpc/grpc,LuminateWireless/grpc,pmarks-net/grpc,makdharma/grpc,muxi/grpc,chrisdunelm/grpc,yugui/grpc,grpc/grpc,vsco/grpc,dklempner/grpc,ipylypiv/grpc,dklempner/grpc,kskalski/grpc,MakMukhi/grpc
--- +++ @@ -0,0 +1,78 @@ +#!/usr/bin/env python2.7 + +# Copyright 2015, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# check for directory level 'README.md' files +# check that all implementation and interface files have a \file doxygen comment + +import os +import sys + +# where do we run +_TARGET_DIRS = [ + 'include/grpc', + 'include/grpc++', + 'src/core', + 'src/cpp', + 'test/core', + 'test/cpp' +] + +# which file extensions do we care about +_INTERESTING_EXTENSIONS = [ + '.c', + '.h', + '.cc' +] + +# find our home +_ROOT = os.path.abspath( + os.path.join(os.path.dirname(sys.argv[0]), '../../..')) +os.chdir(_ROOT) + +errors = 0 + +# walk directories, find things +for target_dir in _TARGET_DIRS: + for root, dirs, filenames in os.walk(target_dir): + if 'README.md' not in filenames: + print '%s: missing README.md' % root + errors += 1 + for filename in filenames: + if os.path.splitext(filename)[1] not in _INTERESTING_EXTENSIONS: + continue + path = os.path.join(root, filename) + with open(path) as f: + contents = f.read() + if '\\file' not in contents: + print '%s: no \\file comment' % path + errors += 1 + +assert errors == 0, 'error count = %d' % errors
b363b20440e564b2909736c64cb543cd632b4ae4
custom/covid/management/commands/fetch_form_case_counts.py
custom/covid/management/commands/fetch_form_case_counts.py
import itertools from datetime import date, datetime, timedelta from django.core.management.base import BaseCommand from corehq.apps.enterprise.models import EnterprisePermissions from corehq.apps.es import CaseES, FormES class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( 'domains', nargs="*", help='Domains to check, will include enterprise-controlled child domains.' ) parser.add_argument('--num-days', type=int, default=30, help='Number of days (UTC) to inspect') def handle(self, domains, **options): filename = "form_case_counts_{}".format(datetime.utcnow().strftime("%Y-%m-%d_%H.%M.%S")) for row in self.get_rows(domains, options['num_days']): if row['forms_submitted']: print(row) def get_rows(self, domains, num_days): end = date.today() start = end - timedelta(days=num_days) for domain in _expand_domains(domains): submissions_counts = _get_submissions_counts(domain, start, end) day = start while day <= end: yield { 'domain': domain, 'date': day.isoformat(), 'forms_submitted': submissions_counts.get(day, 0), } day += timedelta(days=1) def _expand_domains(domains): return sorted(set(itertools.chain( domains, *(EnterprisePermissions.get_domains(domain) for domain in domains) ))) def _get_datetime_range(num_days): now = datetime.utcnow() end = datetime(now.year, now.month, now.day) # 00:00:00 this morning UTC start = end - timedelta(days=num_days) return start, end def _get_submissions_counts(domain, start, end): res = (FormES() .domain(domain) .submitted(gte=start, lte=end) .submitted_histogram() .run().aggregations.date_histogram) return { date.fromisoformat(bucket['key_as_string']): bucket['doc_count'] for bucket in res.normalized_buckets }
Print daily form submission counts
Print daily form submission counts
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,63 @@ +import itertools +from datetime import date, datetime, timedelta + +from django.core.management.base import BaseCommand + +from corehq.apps.enterprise.models import EnterprisePermissions +from corehq.apps.es import CaseES, FormES + + +class Command(BaseCommand): + + def add_arguments(self, parser): + parser.add_argument( + 'domains', nargs="*", + help='Domains to check, will include enterprise-controlled child domains.' + ) + parser.add_argument('--num-days', type=int, default=30, help='Number of days (UTC) to inspect') + + def handle(self, domains, **options): + filename = "form_case_counts_{}".format(datetime.utcnow().strftime("%Y-%m-%d_%H.%M.%S")) + for row in self.get_rows(domains, options['num_days']): + if row['forms_submitted']: + print(row) + + def get_rows(self, domains, num_days): + end = date.today() + start = end - timedelta(days=num_days) + for domain in _expand_domains(domains): + submissions_counts = _get_submissions_counts(domain, start, end) + + day = start + while day <= end: + yield { + 'domain': domain, + 'date': day.isoformat(), + 'forms_submitted': submissions_counts.get(day, 0), + } + day += timedelta(days=1) + + +def _expand_domains(domains): + return sorted(set(itertools.chain( + domains, + *(EnterprisePermissions.get_domains(domain) for domain in domains) + ))) + + +def _get_datetime_range(num_days): + now = datetime.utcnow() + end = datetime(now.year, now.month, now.day) # 00:00:00 this morning UTC + start = end - timedelta(days=num_days) + return start, end + +def _get_submissions_counts(domain, start, end): + res = (FormES() + .domain(domain) + .submitted(gte=start, lte=end) + .submitted_histogram() + .run().aggregations.date_histogram) + return { + date.fromisoformat(bucket['key_as_string']): bucket['doc_count'] + for bucket in res.normalized_buckets + }
d3e90e4dca1cce2f27ccf9c24c1bb944f9d708b9
test/test_message_directory.py
test/test_message_directory.py
""" Created on 18 May 2018. @author: Greg Corbett """ import shutil import tempfile import unittest from ssm.message_directory import MessageDirectory class TestMessageDirectory(unittest.TestCase): """Class used for testing the MessageDirectory class.""" def setUp(self): """Create a MessageDirectory class on top of a temporary directory.""" self.tmp_dir = tempfile.mkdtemp(prefix='message_directory') self.message_directory = MessageDirectory(self.tmp_dir) # Assert no files exist in the underlying file system. self.assertEqual(self.message_directory.count(), 0) def test_add_and_get(self): """ Test the add and get methods of the MessageDirectory class. This test adds a file to a MessageDirectory, checks it has been written to the underlying directory and then checks the saved file for content equality. """ test_content = "FOO" # Add the test content to the MessageDirectory. file_name = self.message_directory.add(test_content) # Assert there is exactly on message in the directory. self.assertEqual(self.message_directory.count(), 1) # Fetch the saved content using the get method. saved_content = self.message_directory.get(file_name) # Assert the saved content is equal to the original test content. self.assertEqual(saved_content, test_content) def test_count(self): """ Test the count method of the MessageDirectory class. This test adds two files to a MessageDirectory and then checks the output of the count() function is as expected. """ # Add some files to the MessageDirectory. self.message_directory.add("FOO") self.message_directory.add("BAR") # Check the count method returns the correct value. self.assertEqual(self.message_directory.count(), 2) def test_lock(self): """ Test the lock method of the MessageDirectory class. This test checks the lock method returns true for any file. """ self.assertTrue(self.message_directory.lock("any file")) def test_purge(self): """ Test the purge method of the MessageDirectory class. This test only checks the purge method is callable without error, as the purge method only logs that it has been called. """ self.message_directory.purge() def test_remove(self): """ Test the remove method of the MessageDirectory class. This test adds a file, removes the file and then checks the number of files present. """ # Add some files to the MessageDirectory. file_name = self.message_directory.add("FOO") # Use the remove method to delete the recently added file. self.message_directory.remove(file_name) # Check the count method returns the expected value. self.assertEqual(self.message_directory.count(), 0) def tearDown(self): """Remove test directory and all contents.""" try: shutil.rmtree(self.tmp_dir) except OSError, error: print 'Error removing temporary directory %s' % self.tmp_dir print error if __name__ == "__main__": unittest.main()
Add a unit test file for MessageDirectory
Add a unit test file for MessageDirectory
Python
apache-2.0
tofu-rocketry/ssm,apel/ssm,tofu-rocketry/ssm,stfc/ssm,apel/ssm,stfc/ssm
--- +++ @@ -0,0 +1,100 @@ +""" +Created on 18 May 2018. + +@author: Greg Corbett +""" + +import shutil +import tempfile +import unittest + +from ssm.message_directory import MessageDirectory + + +class TestMessageDirectory(unittest.TestCase): + """Class used for testing the MessageDirectory class.""" + + def setUp(self): + """Create a MessageDirectory class on top of a temporary directory.""" + self.tmp_dir = tempfile.mkdtemp(prefix='message_directory') + self.message_directory = MessageDirectory(self.tmp_dir) + # Assert no files exist in the underlying file system. + self.assertEqual(self.message_directory.count(), 0) + + def test_add_and_get(self): + """ + Test the add and get methods of the MessageDirectory class. + + This test adds a file to a MessageDirectory, checks it has been + written to the underlying directory and then checks the saved file + for content equality. + """ + test_content = "FOO" + # Add the test content to the MessageDirectory. + file_name = self.message_directory.add(test_content) + + # Assert there is exactly on message in the directory. + self.assertEqual(self.message_directory.count(), 1) + + # Fetch the saved content using the get method. + saved_content = self.message_directory.get(file_name) + + # Assert the saved content is equal to the original test content. + self.assertEqual(saved_content, test_content) + + def test_count(self): + """ + Test the count method of the MessageDirectory class. + + This test adds two files to a MessageDirectory and then checks + the output of the count() function is as expected. + """ + # Add some files to the MessageDirectory. + self.message_directory.add("FOO") + self.message_directory.add("BAR") + + # Check the count method returns the correct value. + self.assertEqual(self.message_directory.count(), 2) + + def test_lock(self): + """ + Test the lock method of the MessageDirectory class. + + This test checks the lock method returns true for any file. + """ + self.assertTrue(self.message_directory.lock("any file")) + + def test_purge(self): + """ + Test the purge method of the MessageDirectory class. + + This test only checks the purge method is callable without error, + as the purge method only logs that it has been called. + """ + self.message_directory.purge() + + def test_remove(self): + """ + Test the remove method of the MessageDirectory class. + + This test adds a file, removes the file and then checks + the number of files present. + """ + # Add some files to the MessageDirectory. + file_name = self.message_directory.add("FOO") + # Use the remove method to delete the recently added file. + self.message_directory.remove(file_name) + # Check the count method returns the expected value. + self.assertEqual(self.message_directory.count(), 0) + + def tearDown(self): + """Remove test directory and all contents.""" + try: + shutil.rmtree(self.tmp_dir) + except OSError, error: + print 'Error removing temporary directory %s' % self.tmp_dir + print error + + +if __name__ == "__main__": + unittest.main()
7fd09bd791661ab0b12921dfd977591690d9c01a
accounts/tests/test_forms.py
accounts/tests/test_forms.py
"""accounts app unittests for views """ from django.test import TestCase from accounts.forms import LoginForm class LoginFormTest(TestCase): """Tests the form which validates the email used for login. """ def test_valid_email_accepted(self): form = LoginForm({'email': 'newvisitor@example.com'}) self.assertTrue(form.is_valid()) def test_invalid_email_declined(self): form = LoginForm({'email': 'invalidemail'}) self.assertFalse(form.is_valid())
Add form tests for LoginForm
Add form tests for LoginForm
Python
mit
randomic/aniauth-tdd,randomic/aniauth-tdd
--- +++ @@ -0,0 +1,19 @@ +"""accounts app unittests for views + +""" +from django.test import TestCase + +from accounts.forms import LoginForm + + +class LoginFormTest(TestCase): + """Tests the form which validates the email used for login. + + """ + def test_valid_email_accepted(self): + form = LoginForm({'email': 'newvisitor@example.com'}) + self.assertTrue(form.is_valid()) + + def test_invalid_email_declined(self): + form = LoginForm({'email': 'invalidemail'}) + self.assertFalse(form.is_valid())
28a7077b7f05f52d0bff7a849f8b50f82f73dbdb
idx2md5.py
idx2md5.py
#! /usr/bin/python from __future__ import print_function import sys import photo.index idx = photo.index.Index(idxfile=sys.argv[1]) for i in idx: print("%s %s" % (i.md5, i.filename))
Add a script to convert an index to a md5 file.
Add a script to convert an index to a md5 file.
Python
apache-2.0
RKrahl/photo-tools
--- +++ @@ -0,0 +1,9 @@ +#! /usr/bin/python + +from __future__ import print_function +import sys +import photo.index + +idx = photo.index.Index(idxfile=sys.argv[1]) +for i in idx: + print("%s %s" % (i.md5, i.filename))
05e2b100512a9c9b06c5d7d2701867f155c5e3f0
senlin/tests/tempest/api/profiles/test_profile_validate.py
senlin/tests/tempest/api/profiles/test_profile_validate.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import decorators from senlin.tests.tempest.api import base from senlin.tests.tempest.common import constants class TestProfileValidate(base.BaseSenlinAPITest): @decorators.idempotent_id('ff678e2d-60d0-43da-808f-cb70a3926112') def test_profile_validate(self): params = { 'profile': { 'spec': constants.spec_nova_server, } } res = self.client.validate_obj('profiles', params) # Verify resp of validate create API self.assertEqual(200, res['status']) self.assertIsNotNone(res['body']) profile = res['body'] for key in ['created_at', 'domain', 'id', 'metadata', 'name', 'project', 'spec', 'type', 'updated_at', 'user']: self.assertIn(key, profile) self.assertEqual('validated_profile', profile['name']) self.assertEqual('os.nova.server-1.0', profile['type']) self.assertEqual(constants.spec_nova_server, profile['spec'])
Add API tests for profile validation
Add API tests for profile validation Add positive API tests for profile validation, add missing tests. Change-Id: I3bed44af9b90e317a0d1d76b883182db9338d40d
Python
apache-2.0
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
--- +++ @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tempest.lib import decorators + +from senlin.tests.tempest.api import base +from senlin.tests.tempest.common import constants + + +class TestProfileValidate(base.BaseSenlinAPITest): + + @decorators.idempotent_id('ff678e2d-60d0-43da-808f-cb70a3926112') + def test_profile_validate(self): + params = { + 'profile': { + 'spec': constants.spec_nova_server, + } + } + res = self.client.validate_obj('profiles', params) + + # Verify resp of validate create API + self.assertEqual(200, res['status']) + self.assertIsNotNone(res['body']) + profile = res['body'] + for key in ['created_at', 'domain', 'id', 'metadata', 'name', + 'project', 'spec', 'type', 'updated_at', 'user']: + self.assertIn(key, profile) + self.assertEqual('validated_profile', profile['name']) + self.assertEqual('os.nova.server-1.0', profile['type']) + self.assertEqual(constants.spec_nova_server, profile['spec'])
861a5fcda82fefbe10c844fda4075688dc6baf8e
mzalendo/votematch/views.py
mzalendo/votematch/views.py
import models from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template import RequestContext def quiz_detail (request, slug): quiz = get_object_or_404( models.Quiz, slug=slug ) return render_to_response( 'votematch/quiz_detail.html', { 'object': quiz, 'choices': models.agreement_choices, }, context_instance=RequestContext(request) ) def submission_detail (request, slug, token): # TODO - we're not checking that the quiz slug is correct. We don't really # care - but should probably check just to be correct. submission = get_object_or_404( models.Submission, token = token ) return render_to_response( 'votematch/submission_detail.html', { 'object': submission, }, context_instance=RequestContext(request) )
import models from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template import RequestContext def quiz_detail (request, slug): quiz = get_object_or_404( models.Quiz, slug=slug ) # If this is a POST then extract all the answers if request.method == 'POST': # get the answers. Use the current set of statements to look for # submitted values. Ignore anything that is not expected. answers = {} statements = {} for statement in quiz.statement_set.all(): statements[statement.id] = statement val = request.POST.get( 'statement-' + str(statement.id) ) if len( val ): # ignore "" which is used for 'don't know' defaults answers[statement.id] = int(val) if len(answers): submission = models.Submission.objects.create(quiz=quiz) for statement_id, answer in answers.iteritems(): submission.answer_set.create( statement = statements[statement_id], agreement = answer ) return redirect(submission) return render_to_response( 'votematch/quiz_detail.html', { 'object': quiz, 'choices': models.agreement_choices, }, context_instance=RequestContext(request) ) def submission_detail (request, slug, token): # TODO - we're not checking that the quiz slug is correct. We don't really # care - but should probably check just to be correct. submission = get_object_or_404( models.Submission, token = token ) return render_to_response( 'votematch/submission_detail.html', { 'object': submission, }, context_instance=RequestContext(request) )
Save submissions, and redirect user to the submission detail page after completing form.
Save submissions, and redirect user to the submission detail page after completing form.
Python
agpl-3.0
patricmutwiri/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,ken-muturi/pombola,patricmutwiri/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,geoffkilpin/pombola,ken-muturi/pombola,hzj123/56th,hzj123/56th,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,hzj123/56th,geoffkilpin/pombola
--- +++ @@ -3,12 +3,39 @@ from django.shortcuts import render_to_response, get_object_or_404, redirect from django.template import RequestContext + def quiz_detail (request, slug): + quiz = get_object_or_404( models.Quiz, slug=slug ) + # If this is a POST then extract all the answers + if request.method == 'POST': + + # get the answers. Use the current set of statements to look for + # submitted values. Ignore anything that is not expected. + answers = {} + statements = {} + for statement in quiz.statement_set.all(): + statements[statement.id] = statement + val = request.POST.get( 'statement-' + str(statement.id) ) + if len( val ): # ignore "" which is used for 'don't know' defaults + answers[statement.id] = int(val) + + if len(answers): + submission = models.Submission.objects.create(quiz=quiz) + + for statement_id, answer in answers.iteritems(): + submission.answer_set.create( + statement = statements[statement_id], + agreement = answer + ) + + return redirect(submission) + + return render_to_response( 'votematch/quiz_detail.html', {
c4e6b2b68e6acd8f83091fc055897628b8df05bb
lowfat/migrations/0105_auto_20170615_1400.py
lowfat/migrations/0105_auto_20170615_1400.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-06-15 14:00 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('lowfat', '0104_auto_20170607_1428'), ] operations = [ migrations.AddField( model_name='historicalblog', name='history_change_reason', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='historicalclaimant', name='history_change_reason', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='historicalexpense', name='history_change_reason', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='historicalfund', name='history_change_reason', field=models.CharField(max_length=100, null=True), ), migrations.AddField( model_name='historicalgeneralsentmail', name='history_change_reason', field=models.CharField(max_length=100, null=True), ), ]
Add migration for django-simple-history == 1.9.0
Add migration for django-simple-history == 1.9.0
Python
bsd-3-clause
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
--- +++ @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.2 on 2017-06-15 14:00 +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('lowfat', '0104_auto_20170607_1428'), + ] + + operations = [ + migrations.AddField( + model_name='historicalblog', + name='history_change_reason', + field=models.CharField(max_length=100, null=True), + ), + migrations.AddField( + model_name='historicalclaimant', + name='history_change_reason', + field=models.CharField(max_length=100, null=True), + ), + migrations.AddField( + model_name='historicalexpense', + name='history_change_reason', + field=models.CharField(max_length=100, null=True), + ), + migrations.AddField( + model_name='historicalfund', + name='history_change_reason', + field=models.CharField(max_length=100, null=True), + ), + migrations.AddField( + model_name='historicalgeneralsentmail', + name='history_change_reason', + field=models.CharField(max_length=100, null=True), + ), + ]
22946180e9e5e660be14d453ddf5bb37564ddf33
build-tile-map.py
build-tile-map.py
#!/usr/bin/env python from osgeo import ogr from osgeo import osr from glob import glob import os driver = ogr.GetDriverByName("ESRI Shapefile") ds = driver.CreateDataSource("tile-map.shp") srs = osr.SpatialReference() srs.ImportFromEPSG(26915) layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) field_name = ogr.FieldDefn("Name", ogr.OFTString) field_name.SetWidth(16) layer.CreateField(field_name) tile_ids = [ os.path.splitext(os.path.basename(x))[0] for x in glob('tile-entries/*.txt') ] for tile_id in tile_ids: x1_s, y1_s = tile_id.split('x') x1 = int(x1_s) y1 = int(y1_s) x2 = x1 + 10000 y2 = y1 + 10000 ring = ogr.Geometry(ogr.wkbLinearRing) ring.AddPoint(x1, y1) ring.AddPoint(x2, y1) ring.AddPoint(x2, y2) ring.AddPoint(x1, y2) ring.CloseRings() poly = ogr.Geometry(ogr.wkbPolygon) poly.AddGeometry(ring) feature = ogr.Feature(layer.GetLayerDefn()) feature.SetField("Name", tile_id) feature.SetGeometry(poly) layer.CreateFeature(feature) feature.Destroy() ds.Destroy()
Add a script to build a map of all the tiles
Add a script to build a map of all the tiles
Python
mit
simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic,simonsonc/mn-glo-mosaic
--- +++ @@ -0,0 +1,46 @@ +#!/usr/bin/env python +from osgeo import ogr +from osgeo import osr +from glob import glob +import os + +driver = ogr.GetDriverByName("ESRI Shapefile") +ds = driver.CreateDataSource("tile-map.shp") + +srs = osr.SpatialReference() +srs.ImportFromEPSG(26915) + +layer = ds.CreateLayer("tiles", srs, ogr.wkbPolygon) + +field_name = ogr.FieldDefn("Name", ogr.OFTString) +field_name.SetWidth(16) +layer.CreateField(field_name) + +tile_ids = [ os.path.splitext(os.path.basename(x))[0] for x in glob('tile-entries/*.txt') ] + +for tile_id in tile_ids: + x1_s, y1_s = tile_id.split('x') + x1 = int(x1_s) + y1 = int(y1_s) + x2 = x1 + 10000 + y2 = y1 + 10000 + + ring = ogr.Geometry(ogr.wkbLinearRing) + ring.AddPoint(x1, y1) + ring.AddPoint(x2, y1) + ring.AddPoint(x2, y2) + ring.AddPoint(x1, y2) + ring.CloseRings() + + poly = ogr.Geometry(ogr.wkbPolygon) + poly.AddGeometry(ring) + + feature = ogr.Feature(layer.GetLayerDefn()) + feature.SetField("Name", tile_id) + feature.SetGeometry(poly) + + layer.CreateFeature(feature) + + feature.Destroy() + +ds.Destroy()
c0b37b879b2e20ee71663e64be76ad11c1e1794c
async/3.7/basic/compsleep.py
async/3.7/basic/compsleep.py
#!/usr/bin/env python """https://docs.python.org/3.7/library/asyncio-task.html 変奏版 Features: - asyncio.gather() - asyncio.sleep() - asyncio.run() """ import asyncio import logging import time concurrent = 3 delay = 5 # PYTHONASYNCIODEBUG=1 logging.basicConfig(level=logging.DEBUG) async def async_pause(): await asyncio.sleep(delay) return 0 async def sync_pause(): time.sleep(delay) return 0 async def main(): """Schedule three calls *concurrently*""" tasks = [ async_pause() for _ in range(concurrent)] await asyncio.gather(*tasks) tasks =[ sync_pause() for _ in range(concurrent)] await asyncio.gather(*tasks) # PYTHONASYNCIODEBUG=1 asyncio.run(main(), debug=True)
Add a script that can be used to compare asyncio.sleep to time.sleep
Add a script that can be used to compare asyncio.sleep to time.sleep
Python
mit
showa-yojyo/bin,showa-yojyo/bin
--- +++ @@ -0,0 +1,41 @@ +#!/usr/bin/env python + +"""https://docs.python.org/3.7/library/asyncio-task.html 変奏版 + +Features: +- asyncio.gather() +- asyncio.sleep() +- asyncio.run() +""" + +import asyncio +import logging +import time + +concurrent = 3 +delay = 5 + +# PYTHONASYNCIODEBUG=1 +logging.basicConfig(level=logging.DEBUG) + +async def async_pause(): + await asyncio.sleep(delay) + return 0 + +async def sync_pause(): + time.sleep(delay) + return 0 + +async def main(): + """Schedule three calls *concurrently*""" + + tasks = [ + async_pause() for _ in range(concurrent)] + await asyncio.gather(*tasks) + + tasks =[ + sync_pause() for _ in range(concurrent)] + await asyncio.gather(*tasks) + +# PYTHONASYNCIODEBUG=1 +asyncio.run(main(), debug=True)
ee1df360979ec24fd8233210372eedd3071cee87
tests/test_argspec.py
tests/test_argspec.py
"""Make sure that arguments of open/read/write don't diverge""" import pysoundfile as sf from inspect import getargspec open = getargspec(sf.open) init = getargspec(sf.SoundFile.__init__) read_function = getargspec(sf.read) read_method = getargspec(sf.SoundFile.read) write_function = getargspec(sf.write) def defaults(spec): return dict(zip(reversed(spec.args), reversed(spec.defaults))) def test_if_open_is_identical_to_init(): assert ['self'] + open.args == init.args assert open.varargs == init.varargs assert open.keywords == init.keywords assert open.defaults == init.defaults def test_read_function(): func_defaults = defaults(read_function) meth_defaults = defaults(read_method) open_defaults = defaults(open) # Not meaningful in read() function: del open_defaults['mode'] # Only in read() function: del func_defaults['start'] del func_defaults['stop'] # Same default values as open() and SoundFile.read(): for spec in open_defaults, meth_defaults: for arg, default in spec.items(): assert (arg, func_defaults[arg]) == (arg, default) del func_defaults[arg] assert not func_defaults # No more arguments should be left def test_write_function(): write_defaults = defaults(write_function) open_defaults = defaults(open) # Same default values as open(): for arg, default in write_defaults.items(): assert (arg, open_defaults[arg]) == (arg, default) del open_defaults[arg] del open_defaults['mode'] # mode is always 'w' del open_defaults['channels'] # Inferred from data del open_defaults['sample_rate'] # Obligatory in write() assert not open_defaults # No more arguments should be left
Add test file to check consistent use of default arguments
Add test file to check consistent use of default arguments
Python
bsd-3-clause
mgeier/PySoundFile
--- +++ @@ -0,0 +1,59 @@ +"""Make sure that arguments of open/read/write don't diverge""" + +import pysoundfile as sf +from inspect import getargspec + + +open = getargspec(sf.open) +init = getargspec(sf.SoundFile.__init__) +read_function = getargspec(sf.read) +read_method = getargspec(sf.SoundFile.read) +write_function = getargspec(sf.write) + + +def defaults(spec): + return dict(zip(reversed(spec.args), reversed(spec.defaults))) + + +def test_if_open_is_identical_to_init(): + assert ['self'] + open.args == init.args + assert open.varargs == init.varargs + assert open.keywords == init.keywords + assert open.defaults == init.defaults + + +def test_read_function(): + func_defaults = defaults(read_function) + meth_defaults = defaults(read_method) + open_defaults = defaults(open) + + # Not meaningful in read() function: + del open_defaults['mode'] + + # Only in read() function: + del func_defaults['start'] + del func_defaults['stop'] + + # Same default values as open() and SoundFile.read(): + for spec in open_defaults, meth_defaults: + for arg, default in spec.items(): + assert (arg, func_defaults[arg]) == (arg, default) + del func_defaults[arg] + + assert not func_defaults # No more arguments should be left + + +def test_write_function(): + write_defaults = defaults(write_function) + open_defaults = defaults(open) + + # Same default values as open(): + for arg, default in write_defaults.items(): + assert (arg, open_defaults[arg]) == (arg, default) + del open_defaults[arg] + + del open_defaults['mode'] # mode is always 'w' + del open_defaults['channels'] # Inferred from data + del open_defaults['sample_rate'] # Obligatory in write() + + assert not open_defaults # No more arguments should be left
2b99781e67e1e2e0bb3863c08e81b3cf57a5e296
tests/test_bouncer.py
tests/test_bouncer.py
from rest_framework import status from rest_framework.test import APITestCase from django.contrib.auth import get_user_model from api_bouncer.models import Api User = get_user_model() class BouncerTests(APITestCase): def setUp(self): self.superuser = User.objects.create_superuser( 'john', 'john@localhost.local', 'john123john' ) self.example_api = Api.objects.create( name='httpbin', hosts=['httpbin.org'], upstream_url='https://httpbin.org' ) def test_bounce_api_request(self): """ Ensure we can bouncer a request to an api and get the same response. """ url = '/status/418' # teapot self.client.credentials(HTTP_HOST='httpbin.org') response = self.client.get(url) self.assertEqual(response.status_code, 418) self.assertIn('teapot', response.content.decode('utf-8')) def test_bounce_api_request_unknown_host(self): """ Ensure we send a response when the hosts making the request is not trying to call an api. """ url = '/test' self.client.credentials(HTTP_HOST='the-unknown.com') response = self.client.get(url) self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json(), {})
Add request bouncer test cases
Add request bouncer test cases
Python
apache-2.0
menecio/django-api-bouncer
--- +++ @@ -0,0 +1,42 @@ +from rest_framework import status +from rest_framework.test import APITestCase +from django.contrib.auth import get_user_model + +from api_bouncer.models import Api + +User = get_user_model() + + +class BouncerTests(APITestCase): + def setUp(self): + self.superuser = User.objects.create_superuser( + 'john', + 'john@localhost.local', + 'john123john' + ) + self.example_api = Api.objects.create( + name='httpbin', + hosts=['httpbin.org'], + upstream_url='https://httpbin.org' + ) + + def test_bounce_api_request(self): + """ + Ensure we can bouncer a request to an api and get the same response. + """ + url = '/status/418' # teapot + self.client.credentials(HTTP_HOST='httpbin.org') + response = self.client.get(url) + self.assertEqual(response.status_code, 418) + self.assertIn('teapot', response.content.decode('utf-8')) + + def test_bounce_api_request_unknown_host(self): + """ + Ensure we send a response when the hosts making the request is not + trying to call an api. + """ + url = '/test' + self.client.credentials(HTTP_HOST='the-unknown.com') + response = self.client.get(url) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.json(), {})
ab1bc996d477c84187df381ec77e7aaab299783b
tests/test_utils.py
tests/test_utils.py
from mws.mws import calc_md5 def test_calc_md5(): assert calc_md5(b'mws') == b'mA5nPbh1CSx9M3dbkr3Cyg=='
Add test for calc_md5() function
Add test for calc_md5() function
Python
unlicense
bpipat/mws,jameshiew/mws,Bobspadger/python-amazon-mws,GriceTurrble/python-amazon-mws
--- +++ @@ -0,0 +1,5 @@ +from mws.mws import calc_md5 + + +def test_calc_md5(): + assert calc_md5(b'mws') == b'mA5nPbh1CSx9M3dbkr3Cyg=='
13b3320399e51bbbb4018ea5fb3ff6d63c8864c7
celexRunScript.py
celexRunScript.py
from celex import Celex ''' - Allows for a quick run of a desired size pulled from any evolution file. - Automatically loads the best chromosome from the evolution to test. - Useful for running large-scale tests of a chromosome that appears to be performing well. - For small tests to ensure system functionality, run the unit tests (see README.md) - is thread-safe. Set parameters here: ''' evoFileLocation = "./GeneticAlgorithm/EvolutionLogs/Archive/1/evo309.log" trainingSize = 500 testingSize = 25 '''---------------------------------------''' with open(evoFileLocation,'r') as evo: bestEvoRaw = evo.readline().split() transcriptionScheme = [] for category in bestEvoRaw: transcriptionScheme.append(map(lambda x: x, category)) c = Celex() c.loadSets(trainingSize,testingSize) GUID = c.trainHMM(transcriptionScheme) percentSame = c.testHMM(transcriptionScheme, GUID)
Add quick run script for celex fullscale tests
Add quick run script for celex fullscale tests
Python
mit
jacobkrantz/ProbSyllabifier
--- +++ @@ -0,0 +1,27 @@ +from celex import Celex +''' +- Allows for a quick run of a desired size pulled from any evolution file. +- Automatically loads the best chromosome from the evolution to test. +- Useful for running large-scale tests of a chromosome that appears +to be performing well. +- For small tests to ensure system functionality, run the unit tests (see README.md) +- is thread-safe. + +Set parameters here: +''' +evoFileLocation = "./GeneticAlgorithm/EvolutionLogs/Archive/1/evo309.log" +trainingSize = 500 +testingSize = 25 +'''---------------------------------------''' + + +with open(evoFileLocation,'r') as evo: + bestEvoRaw = evo.readline().split() + transcriptionScheme = [] + for category in bestEvoRaw: + transcriptionScheme.append(map(lambda x: x, category)) + +c = Celex() +c.loadSets(trainingSize,testingSize) +GUID = c.trainHMM(transcriptionScheme) +percentSame = c.testHMM(transcriptionScheme, GUID)
25a4c9ba978aef7f648904c654fcc044f429acd4
custom/openclinica/models.py
custom/openclinica/models.py
from collections import defaultdict from corehq.apps.users.models import CouchUser from custom.openclinica.const import AUDIT_LOGS from custom.openclinica.utils import ( OpenClinicaIntegrationError, is_item_group_repeating, is_study_event_repeating, get_item_measurement_unit, get_question_item, get_oc_user, get_study_event_name, ) class Subject(object): """ Manages data for a subject case """ def __init__(self, subject_key, study_subject_id, domain): self.subject_key = subject_key self.study_subject_id = study_subject_id # We need the domain to get study metadata for study events and item groups self._domain = domain # This subject's data. Stored as subject[study_event_oid][i][form_oid][item_group_oid][j][item_oid] # (Study events and item groups are lists because they can repeat.) self.data = {} def get_report_events(self): """ The events as they appear in the report. These are useful for scheduling events in OpenClinica, which cannot be imported from ODM until they have been scheduled. """ events = [] for study_events in self.data.itervalues(): for study_event in study_events: events.append( '"{name}" ({start} - {end})'.format( name=study_event.name, start=study_event.start_short, end=study_event.end_short)) return ', '.join(events) def get_export_data(self): """ Transform Subject.data into the structure that CdiscOdmExportWriter expects """ mkitemlist = lambda d: [dict(v, item_oid=k) for k, v in d.iteritems()] # `dict()` updates v with item_oid def mkitemgrouplist(itemgroupdict): itemgrouplist = [] for oid, item_groups in itemgroupdict.iteritems(): for i, item_group in enumerate(item_groups): itemgrouplist.append({ 'item_group_oid': oid, 'repeat_key': i + 1, 'items': mkitemlist(item_group.items) }) return itemgrouplist mkformslist = lambda d: [{'form_oid': k, 'item_groups': mkitemgrouplist(v)} for k, v in d.iteritems()] def mkeventslist(eventsdict): eventslist = [] for oid, study_events in eventsdict.iteritems(): for i, study_event in enumerate(study_events): eventslist.append({ 'study_event_oid': oid, 'repeat_key': i + 1, 'start_long': study_event.start_long, 'end_long': study_event.end_long, 'forms': mkformslist(study_event.forms) }) return eventslist return mkeventslist(self.data)
Add Subjects model, methods for report and export
Add Subjects model, methods for report and export
Python
bsd-3-clause
qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,79 @@ +from collections import defaultdict +from corehq.apps.users.models import CouchUser +from custom.openclinica.const import AUDIT_LOGS +from custom.openclinica.utils import ( + OpenClinicaIntegrationError, + is_item_group_repeating, + is_study_event_repeating, + get_item_measurement_unit, + get_question_item, + get_oc_user, + get_study_event_name, +) + + +class Subject(object): + """ + Manages data for a subject case + """ + def __init__(self, subject_key, study_subject_id, domain): + self.subject_key = subject_key + self.study_subject_id = study_subject_id + + # We need the domain to get study metadata for study events and item groups + self._domain = domain + + # This subject's data. Stored as subject[study_event_oid][i][form_oid][item_group_oid][j][item_oid] + # (Study events and item groups are lists because they can repeat.) + self.data = {} + + def get_report_events(self): + """ + The events as they appear in the report. + + These are useful for scheduling events in OpenClinica, which cannot be imported from ODM until they have + been scheduled. + """ + events = [] + for study_events in self.data.itervalues(): + for study_event in study_events: + events.append( + '"{name}" ({start} - {end})'.format( + name=study_event.name, + start=study_event.start_short, + end=study_event.end_short)) + return ', '.join(events) + + def get_export_data(self): + """ + Transform Subject.data into the structure that CdiscOdmExportWriter expects + """ + mkitemlist = lambda d: [dict(v, item_oid=k) for k, v in d.iteritems()] # `dict()` updates v with item_oid + + def mkitemgrouplist(itemgroupdict): + itemgrouplist = [] + for oid, item_groups in itemgroupdict.iteritems(): + for i, item_group in enumerate(item_groups): + itemgrouplist.append({ + 'item_group_oid': oid, + 'repeat_key': i + 1, + 'items': mkitemlist(item_group.items) + }) + return itemgrouplist + + mkformslist = lambda d: [{'form_oid': k, 'item_groups': mkitemgrouplist(v)} for k, v in d.iteritems()] + + def mkeventslist(eventsdict): + eventslist = [] + for oid, study_events in eventsdict.iteritems(): + for i, study_event in enumerate(study_events): + eventslist.append({ + 'study_event_oid': oid, + 'repeat_key': i + 1, + 'start_long': study_event.start_long, + 'end_long': study_event.end_long, + 'forms': mkformslist(study_event.forms) + }) + return eventslist + + return mkeventslist(self.data)
9d2a2b0e1f066b2606e62ec019b56d4659ed86b1
pygraphc/clustering/ClusterEvaluation.py
pygraphc/clustering/ClusterEvaluation.py
from sklearn import metrics class ClusterEvaluation(object): @staticmethod def get_evaluated(evaluated_file): with open(evaluated_file, 'r') as ef: evaluations = ef.readlines() evaluation_labels = [evaluation.split(';')[0] for evaluation in evaluations] return evaluation_labels @staticmethod def get_adjusted_rand_score(standard_file, prediction_file): standard_labels = ClusterEvaluation.get_evaluated(standard_file) prediction_labels = ClusterEvaluation.get_evaluated(prediction_file) return metrics.adjusted_rand_score(standard_labels, prediction_labels)
Add cluster evaluation: adjusted rand index
Add cluster evaluation: adjusted rand index
Python
mit
studiawan/pygraphc
--- +++ @@ -0,0 +1,18 @@ +from sklearn import metrics + + +class ClusterEvaluation(object): + @staticmethod + def get_evaluated(evaluated_file): + with open(evaluated_file, 'r') as ef: + evaluations = ef.readlines() + + evaluation_labels = [evaluation.split(';')[0] for evaluation in evaluations] + return evaluation_labels + + @staticmethod + def get_adjusted_rand_score(standard_file, prediction_file): + standard_labels = ClusterEvaluation.get_evaluated(standard_file) + prediction_labels = ClusterEvaluation.get_evaluated(prediction_file) + + return metrics.adjusted_rand_score(standard_labels, prediction_labels)
f06e5a8c701f06d40597cd268a6739988c2fff56
corehq/apps/cleanup/tasks.py
corehq/apps/cleanup/tasks.py
import os from collections import defaultdict from django.conf import settings from django.core.management import call_command from celery.schedules import crontab from celery.task import periodic_task from datetime import datetime from corehq.apps.cleanup.management.commands.fix_xforms_with_undefined_xmlns import \ parse_log_message, ERROR_SAVING, SET_XMLNS, MULTI_MATCH def get_summary_stats_from_stream(stream): summary = { # A dictionary like: { # "foo-domain": 7, # "bar-domain": 3, # } 'not_fixed': defaultdict(lambda: 0), 'fixed': defaultdict(lambda: 0), 'errors': defaultdict(lambda: 0), 'submitting_bad_forms': defaultdict(set), 'multi_match_builds': set(), } for line in stream: level, event, extras = parse_log_message(line) domain = extras.get('domain', '') if event == ERROR_SAVING: summary['errors'] += 1 elif event == SET_XMLNS or event == MULTI_MATCH: summary['submitting_bad_forms'][domain].add( extras.get('username', '') ) if event == SET_XMLNS: summary['fixed'][domain] += 1 if event == MULTI_MATCH: summary['not_fixed'][domain] += 1 summary['multi_match_builds'].add( (domain, extras.get('build_id', '')) ) return summary def pprint_stats(stats, outstream): outstream.write("Number of errors: {}\n".format(sum(stats['errors'].values()))) outstream.write("Number of xforms that we could not fix: {}\n".format(sum(stats['not_fixed'].values()))) outstream.write("Number of xforms that we fixed: {}\n".format(sum(stats['fixed'].values()))) outstream.write("Domains and users that submitted bad xforms:\n") for domain, users in sorted(stats['submitting_bad_forms'].items()): outstream.write( " {} ({} fixed, {} not fixed, {} errors)\n".format( domain, stats['fixed'][domain], stats['not_fixed'][domain], stats['errors'][domain] ) ) for user in sorted(list(users)): outstream.write(" {}\n".format(user))
Add functions for parsing log file
Add functions for parsing log file
Python
bsd-3-clause
dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
--- +++ @@ -0,0 +1,59 @@ +import os +from collections import defaultdict + +from django.conf import settings +from django.core.management import call_command + +from celery.schedules import crontab +from celery.task import periodic_task +from datetime import datetime + +from corehq.apps.cleanup.management.commands.fix_xforms_with_undefined_xmlns import \ + parse_log_message, ERROR_SAVING, SET_XMLNS, MULTI_MATCH + + +def get_summary_stats_from_stream(stream): + summary = { + # A dictionary like: { + # "foo-domain": 7, + # "bar-domain": 3, + # } + 'not_fixed': defaultdict(lambda: 0), + 'fixed': defaultdict(lambda: 0), + 'errors': defaultdict(lambda: 0), + 'submitting_bad_forms': defaultdict(set), + 'multi_match_builds': set(), + } + + for line in stream: + level, event, extras = parse_log_message(line) + domain = extras.get('domain', '') + if event == ERROR_SAVING: + summary['errors'] += 1 + elif event == SET_XMLNS or event == MULTI_MATCH: + summary['submitting_bad_forms'][domain].add( + extras.get('username', '') + ) + if event == SET_XMLNS: + summary['fixed'][domain] += 1 + if event == MULTI_MATCH: + summary['not_fixed'][domain] += 1 + summary['multi_match_builds'].add( + (domain, extras.get('build_id', '')) + ) + return summary + +def pprint_stats(stats, outstream): + outstream.write("Number of errors: {}\n".format(sum(stats['errors'].values()))) + outstream.write("Number of xforms that we could not fix: {}\n".format(sum(stats['not_fixed'].values()))) + outstream.write("Number of xforms that we fixed: {}\n".format(sum(stats['fixed'].values()))) + outstream.write("Domains and users that submitted bad xforms:\n") + for domain, users in sorted(stats['submitting_bad_forms'].items()): + outstream.write( + " {} ({} fixed, {} not fixed, {} errors)\n".format( + domain, stats['fixed'][domain], stats['not_fixed'][domain], stats['errors'][domain] + ) + ) + for user in sorted(list(users)): + outstream.write(" {}\n".format(user)) +
095b9cc5f2e9a87220e6f40f88bf6ecd598ca681
vistrails/packages/componentSearch/init.py
vistrails/packages/componentSearch/init.py
#Copied imports from HTTP package init.py file from PyQt4 import QtGui from core.modules.vistrails_module import ModuleError from core.configuration import get_vistrails_persistent_configuration from gui.utils import show_warning import core.modules.vistrails_module import core.modules import core.modules.basic_modules import core.modules.module_registry import core.system from core import debug from component_search_form import * class ComponentSearch(core.modules.vistrails_module.Module): pass def initialize(*args, **keywords): reg = core.modules.module_registry.get_module_registry() basic = core.modules.basic_modules reg.add_module(ComponentSearch, abstract=True)
Add abstract box for ComponentSearch so users can drag it into their workflows.
Add abstract box for ComponentSearch so users can drag it into their workflows.
Python
bsd-3-clause
CMUSV-VisTrails/WorkflowRecommendation,CMUSV-VisTrails/WorkflowRecommendation,CMUSV-VisTrails/WorkflowRecommendation
--- +++ @@ -0,0 +1,22 @@ +#Copied imports from HTTP package init.py file +from PyQt4 import QtGui +from core.modules.vistrails_module import ModuleError +from core.configuration import get_vistrails_persistent_configuration +from gui.utils import show_warning +import core.modules.vistrails_module +import core.modules +import core.modules.basic_modules +import core.modules.module_registry +import core.system +from core import debug + +from component_search_form import * + +class ComponentSearch(core.modules.vistrails_module.Module): + pass + +def initialize(*args, **keywords): + reg = core.modules.module_registry.get_module_registry() + basic = core.modules.basic_modules + + reg.add_module(ComponentSearch, abstract=True)
c3a9d78ca3ffbad0e11192e896db8cd0c2758154
vote/migrations/0002_auto_20160315_0006.py
vote/migrations/0002_auto_20160315_0006.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('vote', '0001_initial'), ] operations = [ migrations.AlterField( model_name='multiquestion', name='group_name', field=models.CharField(max_length=100), ), ]
UPDATE - add migration file
UPDATE - add migration file
Python
mit
mingkim/QuesCheetah,mingkim/QuesCheetah,mingkim/QuesCheetah,mingkim/QuesCheetah
--- +++ @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('vote', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='multiquestion', + name='group_name', + field=models.CharField(max_length=100), + ), + ]
9a9ee99129cee92c93fbc9e2cc24b7b933d51aac
confirmation/migrations/0001_initial.py
confirmation/migrations/0001_initial.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='Confirmation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('object_id', models.PositiveIntegerField()), ('date_sent', models.DateTimeField(verbose_name='sent')), ('confirmation_key', models.CharField(max_length=40, verbose_name='activation key')), ('content_type', models.ForeignKey(to='contenttypes.ContentType')), ], options={ 'verbose_name': 'confirmation email', 'verbose_name_plural': 'confirmation emails', }, bases=(models.Model,), ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('contenttypes', '0001_initial'), ] operations = [ migrations.CreateModel( name='Confirmation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('object_id', models.PositiveIntegerField()), ('date_sent', models.DateTimeField(verbose_name='sent')), ('confirmation_key', models.CharField(max_length=40, verbose_name='activation key')), ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')), ], options={ 'verbose_name': 'confirmation email', 'verbose_name_plural': 'confirmation emails', }, bases=(models.Model,), ), ]
Add on_delete in foreign keys.
confirmation: Add on_delete in foreign keys. on_delete will be a required arg for ForeignKey in Django 2.0. Set it to models.CASCADE on models and in existing migrations if you want to maintain the current default behavior. See https://docs.djangoproject.com/en/1.11/ref/models/fields/#django.db.models.ForeignKey.on_delete
Python
apache-2.0
hackerkid/zulip,dhcrzf/zulip,Galexrt/zulip,showell/zulip,amanharitsh123/zulip,zulip/zulip,vabs22/zulip,vaidap/zulip,jackrzhang/zulip,jrowan/zulip,hackerkid/zulip,zulip/zulip,tommyip/zulip,kou/zulip,jackrzhang/zulip,brockwhittaker/zulip,brockwhittaker/zulip,tommyip/zulip,shubhamdhama/zulip,eeshangarg/zulip,shubhamdhama/zulip,brockwhittaker/zulip,Galexrt/zulip,brainwane/zulip,andersk/zulip,brockwhittaker/zulip,amanharitsh123/zulip,vabs22/zulip,synicalsyntax/zulip,tommyip/zulip,verma-varsha/zulip,kou/zulip,mahim97/zulip,rht/zulip,amanharitsh123/zulip,shubhamdhama/zulip,dhcrzf/zulip,mahim97/zulip,jrowan/zulip,andersk/zulip,timabbott/zulip,eeshangarg/zulip,synicalsyntax/zulip,brainwane/zulip,andersk/zulip,shubhamdhama/zulip,rht/zulip,rishig/zulip,synicalsyntax/zulip,synicalsyntax/zulip,Galexrt/zulip,rishig/zulip,zulip/zulip,jrowan/zulip,verma-varsha/zulip,dhcrzf/zulip,rht/zulip,synicalsyntax/zulip,tommyip/zulip,eeshangarg/zulip,amanharitsh123/zulip,timabbott/zulip,hackerkid/zulip,kou/zulip,andersk/zulip,tommyip/zulip,punchagan/zulip,hackerkid/zulip,vabs22/zulip,mahim97/zulip,punchagan/zulip,verma-varsha/zulip,showell/zulip,vaidap/zulip,jrowan/zulip,mahim97/zulip,rishig/zulip,jackrzhang/zulip,timabbott/zulip,timabbott/zulip,vabs22/zulip,kou/zulip,Galexrt/zulip,brainwane/zulip,tommyip/zulip,tommyip/zulip,rishig/zulip,showell/zulip,shubhamdhama/zulip,verma-varsha/zulip,rishig/zulip,dhcrzf/zulip,Galexrt/zulip,verma-varsha/zulip,Galexrt/zulip,timabbott/zulip,eeshangarg/zulip,jrowan/zulip,brainwane/zulip,zulip/zulip,eeshangarg/zulip,dhcrzf/zulip,rishig/zulip,brockwhittaker/zulip,punchagan/zulip,mahim97/zulip,vabs22/zulip,vabs22/zulip,andersk/zulip,jackrzhang/zulip,vaidap/zulip,eeshangarg/zulip,hackerkid/zulip,jackrzhang/zulip,zulip/zulip,shubhamdhama/zulip,mahim97/zulip,timabbott/zulip,kou/zulip,verma-varsha/zulip,rishig/zulip,hackerkid/zulip,brainwane/zulip,kou/zulip,zulip/zulip,showell/zulip,punchagan/zulip,jrowan/zulip,vaidap/zulip,punchagan/zulip,punchagan/zulip,rht/zulip,eeshangarg/zulip,kou/zulip,showell/zulip,punchagan/zulip,shubhamdhama/zulip,rht/zulip,timabbott/zulip,amanharitsh123/zulip,jackrzhang/zulip,andersk/zulip,dhcrzf/zulip,dhcrzf/zulip,rht/zulip,showell/zulip,brainwane/zulip,synicalsyntax/zulip,amanharitsh123/zulip,showell/zulip,jackrzhang/zulip,zulip/zulip,vaidap/zulip,Galexrt/zulip,hackerkid/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,rht/zulip,brainwane/zulip,synicalsyntax/zulip
--- +++ @@ -2,6 +2,7 @@ from __future__ import unicode_literals from django.db import models, migrations +import django.db.models.deletion class Migration(migrations.Migration): @@ -18,7 +19,7 @@ ('object_id', models.PositiveIntegerField()), ('date_sent', models.DateTimeField(verbose_name='sent')), ('confirmation_key', models.CharField(max_length=40, verbose_name='activation key')), - ('content_type', models.ForeignKey(to='contenttypes.ContentType')), + ('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')), ], options={ 'verbose_name': 'confirmation email',
7ab0cc93703abf6716b353f38a009897ab154ce4
nova/tests/test_plugin_api_extensions.py
nova/tests/test_plugin_api_extensions.py
# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pkg_resources import nova from nova.api.openstack.compute import extensions as computeextensions from nova.api.openstack import extensions from nova.openstack.common.plugin import plugin from nova.openstack.common.plugin import pluginmanager from nova import test class StubController(object): def i_am_the_stub(self): pass class StubControllerExtension(extensions.ExtensionDescriptor): """This is a docstring. We need it.""" name = 'stubextension' alias = 'stubby' def get_resources(self): resources = [] res = extensions.ResourceExtension('testme', StubController()) resources.append(res) return resources service_list = [] class TestPluginClass(plugin.Plugin): def __init__(self, service_name): super(TestPluginClass, self).__init__(service_name) self._add_api_extension_descriptor(StubControllerExtension) service_list.append(service_name) class MockEntrypoint(pkg_resources.EntryPoint): def load(self): return TestPluginClass class APITestCase(test.TestCase): """Test case for the plugin api extension interface""" def test_add_extension(self): def mock_load(_s): return TestPluginClass() def mock_iter_entry_points(_t): return [MockEntrypoint("fake", "fake", ["fake"])] self.stubs.Set(pkg_resources, 'iter_entry_points', mock_iter_entry_points) global service_list service_list = [] # Marking out the default extension paths makes this test MUCH faster. self.flags(osapi_compute_extension=[]) self.flags(osapi_volume_extension=[]) found = False mgr = computeextensions.ExtensionManager() for res in mgr.get_resources(): # We have to use this weird 'dir' check because # the plugin framework muddies up the classname # such that 'isinstance' doesn't work right. if 'i_am_the_stub' in dir(res.controller): found = True self.assertTrue(found) self.assertEqual(len(service_list), 1) self.assertEqual(service_list[0], 'compute-extensions')
Add the plugin framework from common; use and test.
Add the plugin framework from common; use and test. For blueprint novaplugins. Change-Id: Id4a5ae3ebb91f941956e2f73ecfd9ea1d290a235
Python
apache-2.0
n0ano/ganttclient
--- +++ @@ -0,0 +1,90 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import pkg_resources + +import nova +from nova.api.openstack.compute import extensions as computeextensions +from nova.api.openstack import extensions +from nova.openstack.common.plugin import plugin +from nova.openstack.common.plugin import pluginmanager +from nova import test + + +class StubController(object): + + def i_am_the_stub(self): + pass + + +class StubControllerExtension(extensions.ExtensionDescriptor): + """This is a docstring. We need it.""" + name = 'stubextension' + alias = 'stubby' + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension('testme', + StubController()) + resources.append(res) + return resources + + +service_list = [] + + +class TestPluginClass(plugin.Plugin): + + def __init__(self, service_name): + super(TestPluginClass, self).__init__(service_name) + self._add_api_extension_descriptor(StubControllerExtension) + service_list.append(service_name) + + +class MockEntrypoint(pkg_resources.EntryPoint): + def load(self): + return TestPluginClass + + +class APITestCase(test.TestCase): + """Test case for the plugin api extension interface""" + def test_add_extension(self): + def mock_load(_s): + return TestPluginClass() + + def mock_iter_entry_points(_t): + return [MockEntrypoint("fake", "fake", ["fake"])] + + self.stubs.Set(pkg_resources, 'iter_entry_points', + mock_iter_entry_points) + global service_list + service_list = [] + + # Marking out the default extension paths makes this test MUCH faster. + self.flags(osapi_compute_extension=[]) + self.flags(osapi_volume_extension=[]) + + found = False + mgr = computeextensions.ExtensionManager() + for res in mgr.get_resources(): + # We have to use this weird 'dir' check because + # the plugin framework muddies up the classname + # such that 'isinstance' doesn't work right. + if 'i_am_the_stub' in dir(res.controller): + found = True + + self.assertTrue(found) + self.assertEqual(len(service_list), 1) + self.assertEqual(service_list[0], 'compute-extensions')
285236e1045915706b0cf2c6137273be7f9eb5d6
modules/module.py
modules/module.py
# module.py # Author: Sébastien Combéfis # Version: May 25, 2016 from abc import * class Module(metaclass=ABCMeta): '''Abstract class representing a generic module.''' def __init__(self, name): self.__name = name @property def name(self): return self.__name @abstractmethod def widget(self): '''Returns a function that renders the widget view of the module Pre: - Post: The returned value contains the HTML rendering of the widget view of this module or None if not supported by this module ''' ... @abstractmethod def page(self): '''Returns a function that renders the page view of the module Pre: - Post: The returned value contains the HTML rendering of the page view of this module or None if not supported by this module ''' ...
Add generic abstract Module class
Modules: Add generic abstract Module class
Python
agpl-3.0
ECAM-Brussels/ECAMTV,ECAM-Brussels/ECAMTV,ECAM-Brussels/ECAMTV
--- +++ @@ -0,0 +1,34 @@ +# module.py +# Author: Sébastien Combéfis +# Version: May 25, 2016 + +from abc import * + +class Module(metaclass=ABCMeta): + '''Abstract class representing a generic module.''' + def __init__(self, name): + self.__name = name + + @property + def name(self): + return self.__name + + @abstractmethod + def widget(self): + '''Returns a function that renders the widget view of the module + + Pre: - + Post: The returned value contains the HTML rendering of the widget view + of this module or None if not supported by this module + ''' + ... + + @abstractmethod + def page(self): + '''Returns a function that renders the page view of the module + + Pre: - + Post: The returned value contains the HTML rendering of the page view + of this module or None if not supported by this module + ''' + ...
e110c968ece35e41c467aeb5fceb9274023e7e82
pyplaybulb/rainbow.py
pyplaybulb/rainbow.py
from pyplaybulb.playbulb import Playbulb EFFECT_FLASH = '00' EFFECT_PULSE = '01' EFFECT_RAINBOW = '02' EFFECT_RAINBOW_FADE = '03' class Rainbow(Playbulb): hexa_set_colour = '0x001b' hexa_effect = '0x0019' hexa_get_colour = '0x0019' def set_colour(self, colour): self.connection.char_write(self.hexa_set_colour, colour) def get_colour(self): return self.connection.char_read(self.hexa_get_colour) def set_effect(self, effect_type, color, speed): self.connection.char_write(self.hexa_effect, color+effect_type+'00'+speed+'00')
Create child class for Rainbow bulb
Create child class for Rainbow bulb
Python
mit
litobro/PyPlaybulb
--- +++ @@ -0,0 +1,20 @@ +from pyplaybulb.playbulb import Playbulb + +EFFECT_FLASH = '00' +EFFECT_PULSE = '01' +EFFECT_RAINBOW = '02' +EFFECT_RAINBOW_FADE = '03' + +class Rainbow(Playbulb): + hexa_set_colour = '0x001b' + hexa_effect = '0x0019' + hexa_get_colour = '0x0019' + + def set_colour(self, colour): + self.connection.char_write(self.hexa_set_colour, colour) + + def get_colour(self): + return self.connection.char_read(self.hexa_get_colour) + + def set_effect(self, effect_type, color, speed): + self.connection.char_write(self.hexa_effect, color+effect_type+'00'+speed+'00')
e233352d5016c2b57ec4edbc4366ca4347bc1d98
demo/start_servers.py
demo/start_servers.py
""" start_servers.py <Purpose> A simple script to start the three cloud-side Uptane servers: the Director (including its per-vehicle repositories) the Image Repository the Timeserver To run the demo services in non-interactive mode, run: python start_servers.py To run the demo services in interactive mode, run: python -i -c "from demo.start_servers import *; main()" In either mode, the demo services will respond to commands sent via XMLRPC. """ import threading import demo import demo.demo_timeserver as dt import demo.demo_director as dd import demo.demo_image_repo as di from six.moves import xmlrpc_server def main(): # Start demo Image Repo, including http server and xmlrpc listener (for # webdemo) di.clean_slate() # Start demo Director, including http server and xmlrpc listener (for # manifests, registrations, and webdemo) dd.clean_slate() # Start demo Timeserver, including xmlrpc listener (for requests from demo # Primary) dt.listen() if __name__ == '__main__': main()
Create a single script to run the three demo services
DEMO: Create a single script to run the three demo services (image repo, director, and timeserver)
Python
mit
uptane/uptane,awwad/uptane,awwad/uptane,uptane/uptane
--- +++ @@ -0,0 +1,46 @@ +""" +start_servers.py + +<Purpose> + A simple script to start the three cloud-side Uptane servers: + the Director (including its per-vehicle repositories) + the Image Repository + the Timeserver + + To run the demo services in non-interactive mode, run: + python start_servers.py + + To run the demo services in interactive mode, run: + python -i -c "from demo.start_servers import *; main()" + + In either mode, the demo services will respond to commands sent via XMLRPC. + +""" +import threading +import demo +import demo.demo_timeserver as dt +import demo.demo_director as dd +import demo.demo_image_repo as di +from six.moves import xmlrpc_server + + +def main(): + + # Start demo Image Repo, including http server and xmlrpc listener (for + # webdemo) + di.clean_slate() + + # Start demo Director, including http server and xmlrpc listener (for + # manifests, registrations, and webdemo) + dd.clean_slate() + + # Start demo Timeserver, including xmlrpc listener (for requests from demo + # Primary) + dt.listen() + + + + + +if __name__ == '__main__': + main()
1058ed0847d151246299f73b325004fc04946fa0
Basics/challenge_2.py
Basics/challenge_2.py
#!/usr/bin/env python if __name__ == '__main__': s1 = 0x1c0111001f010100061a024b53535009181c s2 = 0x686974207468652062756c6c277320657965 print(hex(s1 ^ s2))
Set 1 - Challenge 2
Set 1 - Challenge 2
Python
apache-2.0
Scythe14/Crypto
--- +++ @@ -0,0 +1,7 @@ +#!/usr/bin/env python + +if __name__ == '__main__': + s1 = 0x1c0111001f010100061a024b53535009181c + s2 = 0x686974207468652062756c6c277320657965 + + print(hex(s1 ^ s2))
37704a2e905342bf867225fb5a8a3fec0c55a9fd
Problems/stringDiff.py
Problems/stringDiff.py
#!/Applications/anaconda/envs/Python3/bin def main(): # Test suite tests = [ [None, None, None], # Should throw a TypeError ['abcd', 'abcde', 'e'], ['aaabbcdd', 'abdbacade', 'e'], ['abdbacade', 'aaabbcdd', 'e'] ] for item in tests: try: temp_result = find_diff(item[0], item[1]) if temp_result[0] == item[2]: print('PASSED: find_diff({}, {}) returned {}'.format(item[0], item[1], temp_result)) else: print('FAILED: find_diff({}, {}) returned {}, should have returned {}'.format(item[0], item[1], temp_result, item[2])) except TypeError: print('PASSED TypeError test') return 0 return def find_diff(str1, str2): ''' Finds the one additional character in str 2 vs. str1 Input: two strings Output: char (one additional character in str2) Assumes str2 contains all characters from str1, with one additional one ''' if str1 is None or str2 is None: raise TypeError shorter = str1 if len(str1) < len(str2) else str2 longer = str1 if len(str1) >= len(str2) else str2 result = set(longer) - set(shorter) return result.pop() if __name__ == '__main__': main()
Add strDiff problem and tests. Minor tweak to setup.py
Add strDiff problem and tests. Minor tweak to setup.py
Python
mit
HKuz/Test_Code
--- +++ @@ -0,0 +1,47 @@ +#!/Applications/anaconda/envs/Python3/bin + + +def main(): + # Test suite + tests = [ + [None, None, None], # Should throw a TypeError + ['abcd', 'abcde', 'e'], + ['aaabbcdd', 'abdbacade', 'e'], + ['abdbacade', 'aaabbcdd', 'e'] + ] + + for item in tests: + try: + temp_result = find_diff(item[0], item[1]) + if temp_result[0] == item[2]: + print('PASSED: find_diff({}, {}) returned {}'.format(item[0], item[1], temp_result)) + else: + print('FAILED: find_diff({}, {}) returned {}, should have returned {}'.format(item[0], item[1], temp_result, item[2])) + + except TypeError: + print('PASSED TypeError test') + + return 0 + + return + + +def find_diff(str1, str2): + ''' + Finds the one additional character in str 2 vs. str1 + Input: two strings + Output: char (one additional character in str2) + Assumes str2 contains all characters from str1, with one additional one + ''' + if str1 is None or str2 is None: + raise TypeError + + shorter = str1 if len(str1) < len(str2) else str2 + longer = str1 if len(str1) >= len(str2) else str2 + + result = set(longer) - set(shorter) + return result.pop() + + +if __name__ == '__main__': + main()
e120f5fac68e2daf7cdf6e9d7b17b1f63a330595
djangoappengine/db/expressions.py
djangoappengine/db/expressions.py
from django.db.models.sql.expressions import SQLEvaluator from django.db.models.expressions import ExpressionNode OPERATION_MAP = { ExpressionNode.ADD: lambda x, y: x + y, ExpressionNode.SUB: lambda x, y: x - y, ExpressionNode.MUL: lambda x, y: x * y, ExpressionNode.DIV: lambda x, y: x / y, ExpressionNode.MOD: lambda x, y: x % y, ExpressionNode.AND: lambda x, y: x & y, ExpressionNode.OR: lambda x, y: x | y, } class ExpressionEvaluator(SQLEvaluator): def __init__(self, expression, query, entity, allow_joins=True): super(ExpressionEvaluator, self).__init__(expression, query, allow_joins) self.entity = entity ################################################## # Vistor methods for final expression evaluation # ################################################## def evaluate_node(self, node, qn, connection): values = [] for child in node.children: if hasattr(child, 'evaluate'): value = child.evaluate(self, qn, connection) else: value = child if value is not None: values.append(value) return OPERATION_MAP[node.connector](*values) def evaluate_leaf(self, node, qn, connection): return self.entity[qn(self.cols[node][1])]
from django.db.models.sql.expressions import SQLEvaluator from django.db.models.expressions import ExpressionNode OPERATION_MAP = { ExpressionNode.ADD: lambda x, y: x + y, ExpressionNode.SUB: lambda x, y: x - y, ExpressionNode.MUL: lambda x, y: x * y, ExpressionNode.DIV: lambda x, y: x / y, ExpressionNode.MOD: lambda x, y: x % y, ExpressionNode.BITAND: lambda x, y: x & y, ExpressionNode.BITOR: lambda x, y: x | y, } class ExpressionEvaluator(SQLEvaluator): def __init__(self, expression, query, entity, allow_joins=True): super(ExpressionEvaluator, self).__init__(expression, query, allow_joins) self.entity = entity ################################################## # Vistor methods for final expression evaluation # ################################################## def evaluate_node(self, node, qn, connection): values = [] for child in node.children: if hasattr(child, 'evaluate'): value = child.evaluate(self, qn, connection) else: value = child if value is not None: values.append(value) return OPERATION_MAP[node.connector](*values) def evaluate_leaf(self, node, qn, connection): return self.entity[qn(self.cols[node][1])]
Fix ExpressionNode names that changed in django 1.5
Fix ExpressionNode names that changed in django 1.5
Python
bsd-3-clause
django-nonrel/djangoappengine,Implisit/djangoappengine,dwdraju/djangoappengine
--- +++ @@ -8,8 +8,8 @@ ExpressionNode.MUL: lambda x, y: x * y, ExpressionNode.DIV: lambda x, y: x / y, ExpressionNode.MOD: lambda x, y: x % y, - ExpressionNode.AND: lambda x, y: x & y, - ExpressionNode.OR: lambda x, y: x | y, + ExpressionNode.BITAND: lambda x, y: x & y, + ExpressionNode.BITOR: lambda x, y: x | y, }
31f16844dd98516b1f57e3913d0fdba3e5715aa8
logicaldelete/models.py
logicaldelete/models.py
import datetime from django.db import models from logicaldelete import managers class Model(models.Model): """ This base model provides date fields and functionality to enable logical delete functionality in derived models. """ date_created = models.DateTimeField(default=datetime.datetime.now) date_modified = models.DateTimeField(default=datetime.datetime.now) date_removed = models.DateTimeField(null=True, blank=True) objects = managers.LogicalDeletedManager() def active(self): return self.date_removed == None active.boolean = True def delete(self): self.date_removed = datetime.datetime.now() self.save() class Meta: abstract = True
import datetime from django.db import models from logicaldelete import managers class Model(models.Model): """ This base model provides date fields and functionality to enable logical delete functionality in derived models. """ date_created = models.DateTimeField(default=datetime.datetime.now) date_modified = models.DateTimeField(default=datetime.datetime.now) date_removed = models.DateTimeField(null=True, blank=True) objects = managers.LogicalDeletedManager() def active(self): return self.date_removed == None active.boolean = True def delete(self): ''' Soft delete all fk related objects that inherit from logicaldelete class ''' # Fetch related models related_objs = [relation.get_accessor_name() for relation in self._meta.get_all_related_objects()] for objs_model in related_objs: # Retrieve all related objects objs = getattr(self, objs_model).all() for obj in objs: # Checking if inherits from logicaldelete if not issubclass(obj.__class__, Model): break obj.delete() # Soft delete the object self.date_removed = timezone.now() self.save() class Meta: abstract = True
Delete method now softdeletep's all FK related objects
Delete method now softdeletep's all FK related objects
Python
mit
pinax/pinax-models,angvp/django-logical-delete,Ubiwhere/pinax-models,angvp/django-logical-delete,naringas/pinax-models
--- +++ @@ -10,20 +10,40 @@ This base model provides date fields and functionality to enable logical delete functionality in derived models. """ - - date_created = models.DateTimeField(default=datetime.datetime.now) + + date_created = models.DateTimeField(default=datetime.datetime.now) date_modified = models.DateTimeField(default=datetime.datetime.now) - date_removed = models.DateTimeField(null=True, blank=True) - + date_removed = models.DateTimeField(null=True, blank=True) + objects = managers.LogicalDeletedManager() - + def active(self): return self.date_removed == None active.boolean = True - + def delete(self): - self.date_removed = datetime.datetime.now() + ''' + Soft delete all fk related objects that + inherit from logicaldelete class + ''' + + # Fetch related models + related_objs = [relation.get_accessor_name() for + relation in self._meta.get_all_related_objects()] + + for objs_model in related_objs: + # Retrieve all related objects + objs = getattr(self, objs_model).all() + + for obj in objs: + # Checking if inherits from logicaldelete + if not issubclass(obj.__class__, Model): + break + obj.delete() + + # Soft delete the object + self.date_removed = timezone.now() self.save() - + class Meta: abstract = True
3dfc4bfbb71d1e97a6b8213f338df487fbae5fcc
topics/migrations/0016_auto_20151221_0923.py
topics/migrations/0016_auto_20151221_0923.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('topics', '0015_auto_20151218_1823'), ] operations = [ migrations.AlterField( model_name='link', name='pub_date', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published'), ), migrations.AlterField( model_name='link', name='tags', field=models.ManyToManyField(blank=True, to='topics.Tag'), ), migrations.AlterField( model_name='link', name='title', field=models.TextField(blank=True), ), migrations.AlterField( model_name='tag', name='pub_date', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published'), ), migrations.AlterField( model_name='topicsite', name='create_date', field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created'), ), ]
Add auto migration for minor model changes
Add auto migration for minor model changes
Python
mit
andychase/codebook,andychase/codebook
--- +++ @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +from __future__ import unicode_literals + +from django.db import models, migrations +import django.utils.timezone + + +class Migration(migrations.Migration): + + dependencies = [ + ('topics', '0015_auto_20151218_1823'), + ] + + operations = [ + migrations.AlterField( + model_name='link', + name='pub_date', + field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published'), + ), + migrations.AlterField( + model_name='link', + name='tags', + field=models.ManyToManyField(blank=True, to='topics.Tag'), + ), + migrations.AlterField( + model_name='link', + name='title', + field=models.TextField(blank=True), + ), + migrations.AlterField( + model_name='tag', + name='pub_date', + field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published'), + ), + migrations.AlterField( + model_name='topicsite', + name='create_date', + field=models.DateTimeField(default=django.utils.timezone.now, verbose_name='date created'), + ), + ]
2207351e805f11c39f843ca0e0eff261a7a5bde8
python/010_summation_of_primes/primes.py
python/010_summation_of_primes/primes.py
from math import sqrt from typing import Generator def prime_generator(limit: int) -> Generator[int, None, int]: if limit < 2: return else: yield 2 primes = [2] for x in range(3, limit + 1, 2): if any(map(lambda divisor: x % divisor == 0, primes[: int(sqrt(x))])): continue else: primes.append(x) yield x primes_20 = tuple(prime_generator(20)) primes_two_milliot = prime_generator(2000000) sum_primes = sum(primes_two_million)
Add half-naive solution to problem 10
Add half-naive solution to problem 10
Python
bsd-3-clause
gidj/euler,gidj/euler
--- +++ @@ -0,0 +1,22 @@ +from math import sqrt +from typing import Generator + + +def prime_generator(limit: int) -> Generator[int, None, int]: + if limit < 2: + return + else: + yield 2 + primes = [2] + for x in range(3, limit + 1, 2): + if any(map(lambda divisor: x % divisor == 0, primes[: int(sqrt(x))])): + continue + else: + primes.append(x) + yield x + + +primes_20 = tuple(prime_generator(20)) + +primes_two_milliot = prime_generator(2000000) +sum_primes = sum(primes_two_million)
747ff2fbaf9e6216ba932f446418819723611174
euler/solutions/solution_12.py
euler/solutions/solution_12.py
"""Highly divisible triangular number The sequence of triangle numbers is generated by adding the natural numbers. The 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be: 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... Let us list the factors of the first seven triangle numbers: 1: 1 3: 1, 3 6: 1, 2, 3, 6 10: 1, 2, 5, 10 15: 1, 3, 5, 15 21: 1, 3, 7, 21 28: 1, 2, 4, 7, 14, 28 We can see that 28 is the first triangle number to have over five divisors. What is the value of the first triangle number to have over five hundred divisors? """ import math def triangle_number_generator(): """Generator yielding the sequence of triangle numbers.""" i = 0 while True: i += 1 yield int(i * (i + 1) / 2) def check_divisors(target): """Return the value of the first triangle number to have greater than the target number of divisors.""" triangles = triangle_number_generator() for triangle in triangles: divisors = 0 for i in range(1, int(math.sqrt(triangle) + 1)): if triangle % i == 0: divisors += 1 if i*i != triangle: divisors += 1 if divisors > target: return triangle def check_divisors_alternate(target): """Return the value of the first triangle number to have greater than the target number of divisors. Uses prime factorizations. Any integer N can be expressed as N = p_0^a_0 + p_1^a_1 + ... + p_n^a_n, where p_n is a distinct prime number and a_n is its exponent. The number of divisors D(N) of any integer N can be computed as D(N) = (a_0 + 1) * (a_1 + 1) * ... * (a_n + 1) """ triangles = triangle_number_generator() for triangle in triangles: divisors = 1 number = triangle for candidate in range(2, triangle): exponent = 0 while number % candidate == 0: exponent += 1 number /= candidate divisors *= exponent + 1 if divisors > target: return triangle if number == 1: break
Add solution for problem 12
Add solution for problem 12 Highly divisible triangular number
Python
mit
rlucioni/project-euler
--- +++ @@ -0,0 +1,85 @@ +"""Highly divisible triangular number + +The sequence of triangle numbers is generated by adding the natural numbers. + +The 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. + +The first ten terms would be: + +1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ... + +Let us list the factors of the first seven triangle numbers: + +1: 1 +3: 1, 3 +6: 1, 2, 3, 6 +10: 1, 2, 5, 10 +15: 1, 3, 5, 15 +21: 1, 3, 7, 21 +28: 1, 2, 4, 7, 14, 28 + +We can see that 28 is the first triangle number to have over five divisors. + +What is the value of the first triangle number to have over five hundred divisors? +""" +import math + + +def triangle_number_generator(): + """Generator yielding the sequence of triangle numbers.""" + i = 0 + while True: + i += 1 + yield int(i * (i + 1) / 2) + + +def check_divisors(target): + """Return the value of the first triangle number to have greater than the target number of divisors.""" + triangles = triangle_number_generator() + + for triangle in triangles: + divisors = 0 + + for i in range(1, int(math.sqrt(triangle) + 1)): + if triangle % i == 0: + divisors += 1 + + if i*i != triangle: + divisors += 1 + + if divisors > target: + return triangle + + +def check_divisors_alternate(target): + """Return the value of the first triangle number to have greater than the target number of divisors. + + Uses prime factorizations. Any integer N can be expressed as + + N = p_0^a_0 + p_1^a_1 + ... + p_n^a_n, + + where p_n is a distinct prime number and a_n is its exponent. The number of divisors D(N) of any integer + N can be computed as + + D(N) = (a_0 + 1) * (a_1 + 1) * ... * (a_n + 1) + """ + triangles = triangle_number_generator() + + for triangle in triangles: + divisors = 1 + number = triangle + + for candidate in range(2, triangle): + exponent = 0 + + while number % candidate == 0: + exponent += 1 + number /= candidate + + divisors *= exponent + 1 + + if divisors > target: + return triangle + + if number == 1: + break
65aee742ea0e95b200152f8a90d9ad5ee86b2512
scripts/locustfile.py
scripts/locustfile.py
from locust import HttpLocust, TaskSet, task import urllib from faker import Faker fake = Faker() class SPARQLQueryTasks(TaskSet): @task def query_simple(self): self.client.get("/sparql/?query=select+%2A+where+%7B+%3Fs+%3Fp+%3Fo+%7D+limit+0") @task def query_realistic(self): self.client.get("/sparql/?query=select+%2A+where+%7B+%3Fs+%3Fp+%3Fo+%7D+limit+0") class SPARQLInsertTasks(TaskSet): @task def insert_simple(self): self.client.get("/sparql/?query=INSERT+DATA+%7B+GRAPH+%3Chttps%3A%2F%2Fdataone.org%3E+%7B+%3Chttp%3A%2F%2Fexample.com%2FX%3E+%3Chttp%3A%2F%2Fexample.com%2FisA%3E+%3Chttp%3A%2F%2Fexample.com%2FY%3E+%7D%7D") @task def insert_realistic(self): self.client.get("/sparql/?query=INSERT+DATA+%7B+GRAPH+%3Chttps%3A%2F%2Fdataone.org%3E+%7B+%3Chttp%3A%2F%2Fexample.com%2FX%3E+%3Chttp%3A%2F%2Fexample.com%2FisA%3E+%22{}%22+%7D%7D".format(urllib.quote(fake.name()))) class QueryLocust(HttpLocust): weight = 5 task_set = SPARQLQueryTasks class InsertLocust(HttpLocust): weight = 3 task_set = SPARQLInsertTasks
Add a Locust load testing script
Add a Locust load testing script This isn't quite doing everything I want but it's a nice way to check some performance characteristics
Python
apache-2.0
ec-geolink/d1lod,ec-geolink/d1lod,ec-geolink/d1lod,ec-geolink/d1lod
--- +++ @@ -0,0 +1,31 @@ +from locust import HttpLocust, TaskSet, task +import urllib +from faker import Faker + +fake = Faker() + +class SPARQLQueryTasks(TaskSet): + @task + def query_simple(self): + self.client.get("/sparql/?query=select+%2A+where+%7B+%3Fs+%3Fp+%3Fo+%7D+limit+0") + + @task + def query_realistic(self): + self.client.get("/sparql/?query=select+%2A+where+%7B+%3Fs+%3Fp+%3Fo+%7D+limit+0") + +class SPARQLInsertTasks(TaskSet): + @task + def insert_simple(self): + self.client.get("/sparql/?query=INSERT+DATA+%7B+GRAPH+%3Chttps%3A%2F%2Fdataone.org%3E+%7B+%3Chttp%3A%2F%2Fexample.com%2FX%3E+%3Chttp%3A%2F%2Fexample.com%2FisA%3E+%3Chttp%3A%2F%2Fexample.com%2FY%3E+%7D%7D") + + @task + def insert_realistic(self): + self.client.get("/sparql/?query=INSERT+DATA+%7B+GRAPH+%3Chttps%3A%2F%2Fdataone.org%3E+%7B+%3Chttp%3A%2F%2Fexample.com%2FX%3E+%3Chttp%3A%2F%2Fexample.com%2FisA%3E+%22{}%22+%7D%7D".format(urllib.quote(fake.name()))) + +class QueryLocust(HttpLocust): + weight = 5 + task_set = SPARQLQueryTasks + +class InsertLocust(HttpLocust): + weight = 3 + task_set = SPARQLInsertTasks
e84d811f13347da3625e3a30d200e836f6393e9d
scripts/obo_to_sql.py
scripts/obo_to_sql.py
# Note: based on - http://blog.adimian.com/2014/10/cte-and-closure-tables/ import sqlite3 conn = sqlite3.connect('test.db') cursor = conn.cursor() def create_tables(): ''' Creates the two tables used to store the ontology concepts and terms. - 'nodes' stores the .obo content. - 'closure' stores the hierarchy in a transitive closure representation. ''' cursor.execute( 'CREATE TABLE IF NOT EXISTS nodes (' 'id INTEGER NOT NULL PRIMARY KEY,' 'parent INTEGER REFERENCES nodes(id),' 'name VARCHAR(100))') cursor.execute( 'CREATE TABLE IF NOT EXISTS closure (' 'parent INTEGER REFERENCES nodes(id), ' 'child INTEGER REFERENCES nodes(id), ' 'depth INTEGER)') conn.commit() def add_unknown_concepts_to_db(obo_content): ''' Inserts concepts/terms into the database. Moreover, the transitive closure table is also updated upon insertion of an element to ensure it's retrievable later... Args: obo_content (list): a list of Stanzas, i.e. dictionaries containing the relevant obo structure, such as id, name, and relationship. ''' known_ids = [r[0] for r in cursor.execute('SELECT id FROM nodes').fetchall()] for i in obo_content: _id = int(str(i.tags['id'][0]).split(':')[1]) # The root element does not have a parent. Assign it a zero. _pid = (int(str(i.tags['is_a'][0]).split(':')[1]) if 'is_a' in str(i) else 0) _name = str(i.tags['name'][0]) # Only add NEW terms to the database. if _id not in known_ids: # Add ontological term to node table. cursor.execute('INSERT INTO nodes VALUES (?, ?, ?)', (_id, _pid, _name)) last_id = cursor.lastrowid # Collect ancestor of parent, and insert into closure table. cursor.execute('SELECT parent, ? as child, depth+1 FROM closure ' 'WHERE child = ?', (_id, _pid)) stm = 'INSERT INTO closure (parent, child, depth) VALUES (?, ?, ?)' cursor.executemany(stm, cursor.fetchall()) cursor.execute(stm, (last_id, last_id, 0)) conn.commit() if __name__ == "__main__": import obo create_tables() obo_content = [i for i in obo.Parser('../data/structure.obo')] add_unknown_concepts_to_db(obo_content)
Store OBO data into a suitable queryable format
Store OBO data into a suitable queryable format Achieved using transitive closures, which allows a hierarchy structure (OBO ontology) to be stored in a SQL format.
Python
mit
jawrainey/healthchat,jawrainey/healthchat
--- +++ @@ -0,0 +1,67 @@ +# Note: based on - http://blog.adimian.com/2014/10/cte-and-closure-tables/ +import sqlite3 + +conn = sqlite3.connect('test.db') +cursor = conn.cursor() + + +def create_tables(): + ''' + Creates the two tables used to store the ontology concepts and terms. + - 'nodes' stores the .obo content. + - 'closure' stores the hierarchy in a transitive closure representation. + ''' + cursor.execute( + 'CREATE TABLE IF NOT EXISTS nodes (' + 'id INTEGER NOT NULL PRIMARY KEY,' + 'parent INTEGER REFERENCES nodes(id),' + 'name VARCHAR(100))') + + cursor.execute( + 'CREATE TABLE IF NOT EXISTS closure (' + 'parent INTEGER REFERENCES nodes(id), ' + 'child INTEGER REFERENCES nodes(id), ' + 'depth INTEGER)') + conn.commit() + + +def add_unknown_concepts_to_db(obo_content): + ''' + Inserts concepts/terms into the database. + + Moreover, the transitive closure table is also updated upon insertion + of an element to ensure it's retrievable later... + + Args: + obo_content (list): a list of Stanzas, i.e. dictionaries containing + the relevant obo structure, such as id, name, and relationship. + ''' + known_ids = [r[0] for r in + cursor.execute('SELECT id FROM nodes').fetchall()] + + for i in obo_content: + _id = int(str(i.tags['id'][0]).split(':')[1]) + # The root element does not have a parent. Assign it a zero. + _pid = (int(str(i.tags['is_a'][0]).split(':')[1]) + if 'is_a' in str(i) else 0) + _name = str(i.tags['name'][0]) + # Only add NEW terms to the database. + if _id not in known_ids: + # Add ontological term to node table. + cursor.execute('INSERT INTO nodes VALUES (?, ?, ?)', + (_id, _pid, _name)) + last_id = cursor.lastrowid + # Collect ancestor of parent, and insert into closure table. + cursor.execute('SELECT parent, ? as child, depth+1 FROM closure ' + 'WHERE child = ?', (_id, _pid)) + stm = 'INSERT INTO closure (parent, child, depth) VALUES (?, ?, ?)' + cursor.executemany(stm, cursor.fetchall()) + cursor.execute(stm, (last_id, last_id, 0)) + conn.commit() + + +if __name__ == "__main__": + import obo + create_tables() + obo_content = [i for i in obo.Parser('../data/structure.obo')] + add_unknown_concepts_to_db(obo_content)
9b48cb18980ae2e55ce02a84576f65f0bd8a27bb
FacebookPlugins.py
FacebookPlugins.py
""" @note: enable X """ from trac.core import Component from trac.wiki.macros import WikiMacroBase class FacebookPlugins(Component): """ Support for Facebook plugins. """ revision = "$Rev$" url = "$URL$" class LikeButton(WikiMacroBase): """ The [http://developers.facebook.com/docs/reference/plugins/like Like button] lets users share pages from your site back to their Facebook profile with one click. Examples: {{{ [[LikeButton]] # current page [[LikeButton(http://google.nl)]] # google.nl with default layout [[LikeButton(http://google.com,button)]] # google.com with button layout [[LikeButton(http://google.com,box)]] # google.com with box layout }}} """ revision = "$Rev$" url = "$URL$" def expand_macro(self, formatter, name, args): """Description here. @param name: the actual name of the macro @param args: text enclosed in parenthesis at the call of the macro """ options = unicode(args).split(",") href = self.url layout = 'standard' # options: 'button_count', 'box_count' show_faces = 'true' width = '450' height = '80' colorscheme = 'light' # or 'dark' action = 'like' # or 'recommend' if len(options) > 0: href = options[0] if len(options) > 1: layout = options[1] + "_count" iframe_code = '<iframe src="http://www.facebook.com/plugins/like.php?href=%s&layout=%s&show_faces=%s&width=%s&action=%s&colorscheme=%s&height=%s" scrolling="no" frameborder="0" style="border:none; overflow:hidden; width:%spx; height:%spx;" allowTransparency="true"></iframe>' % ( href, layout, show_faces, width, action, colorscheme, height, width, height) return iframe_code
Add wiki macro for the Like button.
Add wiki macro for the Like button.
Python
mit
thijstriemstra/trac-facebook-plugins
--- +++ @@ -0,0 +1,59 @@ +""" +@note: enable X +""" + +from trac.core import Component +from trac.wiki.macros import WikiMacroBase + + +class FacebookPlugins(Component): + """ + Support for Facebook plugins. + """ + + revision = "$Rev$" + url = "$URL$" + + +class LikeButton(WikiMacroBase): + """ + The [http://developers.facebook.com/docs/reference/plugins/like Like button] lets + users share pages from your site back to their Facebook profile with one click. + + Examples: + {{{ + [[LikeButton]] # current page + [[LikeButton(http://google.nl)]] # google.nl with default layout + [[LikeButton(http://google.com,button)]] # google.com with button layout + [[LikeButton(http://google.com,box)]] # google.com with box layout + }}} + """ + + revision = "$Rev$" + url = "$URL$" + + def expand_macro(self, formatter, name, args): + """Description here. + + @param name: the actual name of the macro + @param args: text enclosed in parenthesis at the call of the macro + """ + options = unicode(args).split(",") + href = self.url + layout = 'standard' # options: 'button_count', 'box_count' + show_faces = 'true' + width = '450' + height = '80' + colorscheme = 'light' # or 'dark' + action = 'like' # or 'recommend' + + if len(options) > 0: + href = options[0] + + if len(options) > 1: + layout = options[1] + "_count" + + iframe_code = '<iframe src="http://www.facebook.com/plugins/like.php?href=%s&layout=%s&show_faces=%s&width=%s&action=%s&colorscheme=%s&height=%s" scrolling="no" frameborder="0" style="border:none; overflow:hidden; width:%spx; height:%spx;" allowTransparency="true"></iframe>' % ( + href, layout, show_faces, width, action, colorscheme, height, width, height) + + return iframe_code
fc72ca537e6eaf5f4bf04ed4511ec1acdd9eae11
checks/check_deprecation_warning.py
checks/check_deprecation_warning.py
from __future__ import print_function, absolute_import, division import imgaug as ia class Dummy1(object): @ia.deprecated(alt_func="Foo") def __init__(self): pass class Dummy2(object): @ia.deprecated(alt_func="Foo", comment="Some example comment.") def __init__(self): pass class Dummy3(object): def __init__(self): pass @ia.deprecated(alt_func="bar()", comment="Some example comment.") def foo(self): pass @ia.deprecated(alt_func="bar()", comment="Some example comment.") def foo(): pass def main(): Dummy1() Dummy2() Dummy3() foo() if __name__ == "__main__": main()
Add check script to test 'deprecated' decorator
Add check script to test 'deprecated' decorator
Python
mit
aleju/imgaug,aleju/imgaug,aleju/ImageAugmenter
--- +++ @@ -0,0 +1,41 @@ +from __future__ import print_function, absolute_import, division + +import imgaug as ia + + +class Dummy1(object): + @ia.deprecated(alt_func="Foo") + def __init__(self): + pass + + +class Dummy2(object): + @ia.deprecated(alt_func="Foo", comment="Some example comment.") + def __init__(self): + pass + + +class Dummy3(object): + def __init__(self): + pass + + @ia.deprecated(alt_func="bar()", + comment="Some example comment.") + def foo(self): + pass + + +@ia.deprecated(alt_func="bar()", comment="Some example comment.") +def foo(): + pass + + +def main(): + Dummy1() + Dummy2() + Dummy3() + foo() + + +if __name__ == "__main__": + main()
c97ab456f22dca6a69e3775cc1353dbf3957389a
homeassistant/components/light/limitlessled.py
homeassistant/components/light/limitlessled.py
""" homeassistant.components.light.limitlessled ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for LimitlessLED bulbs, also known as... EasyBulb AppLight AppLamp MiLight LEDme dekolight iLight """ import random import logging from homeassistant.helpers.entity import ToggleEntity from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME from homeassistant.components.light import ATTR_BRIGHTNESS _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices_callback, discovery_info=None): try: import ledcontroller except ImportError: _LOGGER.exception("Error while importing dependency ledcontroller.") return led = ledcontroller.LedController(config['host']) lights = [] for i in range(1, 5): if 'group_%d_name' % (i) in config: lights.append( LimitlessLED( led, i, config['group_%d_name' % (i)], STATE_OFF ) ) add_devices_callback(lights) class LimitlessLED(ToggleEntity): def __init__(self, led, group, name, state, brightness=180): self.led = led self.group = group # LimitlessLEDs don't report state, we have track it ourselves. self.led.off(self.group) self._name = name or DEVICE_DEFAULT_NAME self._state = state self._brightness = brightness @property def should_poll(self): """ No polling needed for a demo light. """ return False @property def name(self): """ Returns the name of the device if any. """ return self._name @property def state(self): """ Returns the name of the device if any. """ return self._state @property def state_attributes(self): """ Returns optional state attributes. """ if self.is_on: return { ATTR_BRIGHTNESS: self._brightness, } @property def is_on(self): """ True if device is on. """ return self._state == STATE_ON def turn_on(self, **kwargs): """ Turn the device on. """ self._state = STATE_ON if ATTR_BRIGHTNESS in kwargs: self._brightness = kwargs[ATTR_BRIGHTNESS] self.led.set_brightness(self._brightness, self.group) def turn_off(self, **kwargs): """ Turn the device off. """ self._state = STATE_OFF self.led.off(self.group)
Add basic support for LimitlessLED
Add basic support for LimitlessLED
Python
apache-2.0
sfam/home-assistant,ErykB2000/home-assistant,open-homeautomation/home-assistant,Danielhiversen/home-assistant,toddeye/home-assistant,nugget/home-assistant,open-homeautomation/home-assistant,deisi/home-assistant,EricRho/home-assistant,tboyce021/home-assistant,theolind/home-assistant,mKeRix/home-assistant,Duoxilian/home-assistant,CCOSTAN/home-assistant,auduny/home-assistant,MungoRae/home-assistant,pottzer/home-assistant,EricRho/home-assistant,instantchow/home-assistant,JshWright/home-assistant,MartinHjelmare/home-assistant,hmronline/home-assistant,tboyce1/home-assistant,lukas-hetzenecker/home-assistant,bdfoster/blumate,eagleamon/home-assistant,Zac-HD/home-assistant,alexmogavero/home-assistant,postlund/home-assistant,HydrelioxGitHub/home-assistant,kennedyshead/home-assistant,oandrew/home-assistant,balloob/home-assistant,alanbowman/home-assistant,eagleamon/home-assistant,maddox/home-assistant,dmeulen/home-assistant,tinloaf/home-assistant,nevercast/home-assistant,bencmbrook/home-assistant,florianholzapfel/home-assistant,leoc/home-assistant,sanmiguel/home-assistant,partofthething/home-assistant,MungoRae/home-assistant,GenericStudent/home-assistant,betrisey/home-assistant,mKeRix/home-assistant,deisi/home-assistant,dorant/home-assistant,betrisey/home-assistant,g12mcgov/home-assistant,leoc/home-assistant,qedi-r/home-assistant,bdfoster/blumate,morphis/home-assistant,aequitas/home-assistant,ma314smith/home-assistant,bencmbrook/home-assistant,mikaelboman/home-assistant,vitorespindola/home-assistant,PetePriority/home-assistant,tchellomello/home-assistant,srcLurker/home-assistant,JshWright/home-assistant,MungoRae/home-assistant,xifle/home-assistant,molobrakos/home-assistant,aequitas/home-assistant,badele/home-assistant,hexxter/home-assistant,mahendra-r/home-assistant,soldag/home-assistant,jaharkes/home-assistant,keerts/home-assistant,partofthething/home-assistant,HydrelioxGitHub/home-assistant,Nzaga/home-assistant,shaftoe/home-assistant,adrienbrault/home-assistant,alexkolar/home-assistant,mezz64/home-assistant,fbradyirl/home-assistant,emilhetty/home-assistant,philipbl/home-assistant,emilhetty/home-assistant,michaelarnauts/home-assistant,eagleamon/home-assistant,ma314smith/home-assistant,aronsky/home-assistant,coteyr/home-assistant,miniconfig/home-assistant,luxus/home-assistant,teodoc/home-assistant,mikaelboman/home-assistant,theolind/home-assistant,happyleavesaoc/home-assistant,sffjunkie/home-assistant,jabesq/home-assistant,pschmitt/home-assistant,sffjunkie/home-assistant,Julian/home-assistant,sffjunkie/home-assistant,caiuspb/home-assistant,coteyr/home-assistant,happyleavesaoc/home-assistant,tboyce1/home-assistant,nkgilley/home-assistant,tomduijf/home-assistant,xifle/home-assistant,soldag/home-assistant,persandstrom/home-assistant,emilhetty/home-assistant,varunr047/homefile,luxus/home-assistant,sfam/home-assistant,mezz64/home-assistant,LinuxChristian/home-assistant,justyns/home-assistant,keerts/home-assistant,aoakeson/home-assistant,qedi-r/home-assistant,varunr047/homefile,w1ll1am23/home-assistant,robbiet480/home-assistant,caiuspb/home-assistant,GenericStudent/home-assistant,jnewland/home-assistant,kyvinh/home-assistant,philipbl/home-assistant,g12mcgov/home-assistant,fbradyirl/home-assistant,tmm1/home-assistant,sffjunkie/home-assistant,bdfoster/blumate,tmm1/home-assistant,titilambert/home-assistant,shaftoe/home-assistant,nugget/home-assistant,jaharkes/home-assistant,vitorespindola/home-assistant,jamespcole/home-assistant,xifle/home-assistant,g12mcgov/home-assistant,florianholzapfel/home-assistant,leppa/home-assistant,DavidLP/home-assistant,ma314smith/home-assistant,SEJeff/home-assistant,nnic/home-assistant,kennedyshead/home-assistant,sfam/home-assistant,dorant/home-assistant,Theb-1/home-assistant,happyleavesaoc/home-assistant,postlund/home-assistant,dorant/home-assistant,nkgilley/home-assistant,Smart-Torvy/torvy-home-assistant,PetePriority/home-assistant,turbokongen/home-assistant,sdague/home-assistant,miniconfig/home-assistant,ct-23/home-assistant,emilhetty/home-assistant,tchellomello/home-assistant,aoakeson/home-assistant,home-assistant/home-assistant,jawilson/home-assistant,jawilson/home-assistant,molobrakos/home-assistant,varunr047/homefile,varunr047/homefile,ct-23/home-assistant,morphis/home-assistant,home-assistant/home-assistant,kyvinh/home-assistant,dmeulen/home-assistant,robbiet480/home-assistant,rohitranjan1991/home-assistant,Cinntax/home-assistant,mikaelboman/home-assistant,justyns/home-assistant,auduny/home-assistant,JshWright/home-assistant,pottzer/home-assistant,devdelay/home-assistant,tboyce021/home-assistant,robjohnson189/home-assistant,nevercast/home-assistant,alanbowman/home-assistant,deisi/home-assistant,Julian/home-assistant,aoakeson/home-assistant,instantchow/home-assistant,michaelarnauts/home-assistant,Julian/home-assistant,hexxter/home-assistant,tomduijf/home-assistant,ewandor/home-assistant,theolind/home-assistant,Zac-HD/home-assistant,shaftoe/home-assistant,Zac-HD/home-assistant,open-homeautomation/home-assistant,Zyell/home-assistant,maddox/home-assistant,jnewland/home-assistant,PetePriority/home-assistant,mikaelboman/home-assistant,rohitranjan1991/home-assistant,Teagan42/home-assistant,betrisey/home-assistant,oandrew/home-assistant,jnewland/home-assistant,maddox/home-assistant,alexmogavero/home-assistant,srcLurker/home-assistant,pottzer/home-assistant,molobrakos/home-assistant,auduny/home-assistant,Zyell/home-assistant,robjohnson189/home-assistant,Duoxilian/home-assistant,persandstrom/home-assistant,EricRho/home-assistant,sdague/home-assistant,MartinHjelmare/home-assistant,tboyce1/home-assistant,Smart-Torvy/torvy-home-assistant,SEJeff/home-assistant,w1ll1am23/home-assistant,MungoRae/home-assistant,coteyr/home-assistant,joopert/home-assistant,teodoc/home-assistant,mahendra-r/home-assistant,DavidLP/home-assistant,LinuxChristian/home-assistant,florianholzapfel/home-assistant,oandrew/home-assistant,kyvinh/home-assistant,stefan-jonasson/home-assistant,Zyell/home-assistant,joopert/home-assistant,Zac-HD/home-assistant,alanbowman/home-assistant,deisi/home-assistant,aronsky/home-assistant,mKeRix/home-assistant,SEJeff/home-assistant,hmronline/home-assistant,varunr047/homefile,tinloaf/home-assistant,ewandor/home-assistant,keerts/home-assistant,nugget/home-assistant,persandstrom/home-assistant,philipbl/home-assistant,jabesq/home-assistant,miniconfig/home-assistant,open-homeautomation/home-assistant,mahendra-r/home-assistant,nnic/home-assistant,dmeulen/home-assistant,tomduijf/home-assistant,tmm1/home-assistant,mKeRix/home-assistant,jaharkes/home-assistant,miniconfig/home-assistant,stefan-jonasson/home-assistant,jamespcole/home-assistant,nnic/home-assistant,srcLurker/home-assistant,bdfoster/blumate,morphis/home-assistant,vitorespindola/home-assistant,Smart-Torvy/torvy-home-assistant,michaelarnauts/home-assistant,jabesq/home-assistant,sanmiguel/home-assistant,LinuxChristian/home-assistant,alexmogavero/home-assistant,nevercast/home-assistant,Duoxilian/home-assistant,balloob/home-assistant,betrisey/home-assistant,morphis/home-assistant,jaharkes/home-assistant,JshWright/home-assistant,emilhetty/home-assistant,jamespcole/home-assistant,leppa/home-assistant,HydrelioxGitHub/home-assistant,shaftoe/home-assistant,Cinntax/home-assistant,caiuspb/home-assistant,adrienbrault/home-assistant,alexkolar/home-assistant,ma314smith/home-assistant,rohitranjan1991/home-assistant,balloob/home-assistant,sanmiguel/home-assistant,titilambert/home-assistant,ct-23/home-assistant,robjohnson189/home-assistant,hexxter/home-assistant,CCOSTAN/home-assistant,lukas-hetzenecker/home-assistant,ErykB2000/home-assistant,bdfoster/blumate,aequitas/home-assistant,FreekingDean/home-assistant,fbradyirl/home-assistant,bencmbrook/home-assistant,mikaelboman/home-assistant,tboyce1/home-assistant,FreekingDean/home-assistant,toddeye/home-assistant,hexxter/home-assistant,stefan-jonasson/home-assistant,sander76/home-assistant,stefan-jonasson/home-assistant,alexkolar/home-assistant,justyns/home-assistant,eagleamon/home-assistant,robjohnson189/home-assistant,hmronline/home-assistant,Theb-1/home-assistant,happyleavesaoc/home-assistant,oandrew/home-assistant,teodoc/home-assistant,MartinHjelmare/home-assistant,luxus/home-assistant,keerts/home-assistant,alexmogavero/home-assistant,ct-23/home-assistant,badele/home-assistant,florianholzapfel/home-assistant,pschmitt/home-assistant,Nzaga/home-assistant,deisi/home-assistant,Duoxilian/home-assistant,srcLurker/home-assistant,ct-23/home-assistant,instantchow/home-assistant,badele/home-assistant,hmronline/home-assistant,LinuxChristian/home-assistant,CCOSTAN/home-assistant,DavidLP/home-assistant,MungoRae/home-assistant,ErykB2000/home-assistant,leoc/home-assistant,devdelay/home-assistant,devdelay/home-assistant,Danielhiversen/home-assistant,Julian/home-assistant,ewandor/home-assistant,LinuxChristian/home-assistant,turbokongen/home-assistant,philipbl/home-assistant,Theb-1/home-assistant,sander76/home-assistant,Teagan42/home-assistant,xifle/home-assistant,Smart-Torvy/torvy-home-assistant,kyvinh/home-assistant,tinloaf/home-assistant,devdelay/home-assistant,dmeulen/home-assistant,leoc/home-assistant,Nzaga/home-assistant,hmronline/home-assistant,sffjunkie/home-assistant
--- +++ @@ -0,0 +1,102 @@ +""" +homeassistant.components.light.limitlessled +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Support for LimitlessLED bulbs, also known as... + +EasyBulb +AppLight +AppLamp +MiLight +LEDme +dekolight +iLight + +""" +import random +import logging + +from homeassistant.helpers.entity import ToggleEntity +from homeassistant.const import STATE_ON, STATE_OFF, DEVICE_DEFAULT_NAME +from homeassistant.components.light import ATTR_BRIGHTNESS + +_LOGGER = logging.getLogger(__name__) + + +def setup_platform(hass, config, add_devices_callback, discovery_info=None): + try: + import ledcontroller + except ImportError: + _LOGGER.exception("Error while importing dependency ledcontroller.") + return + + led = ledcontroller.LedController(config['host']) + + lights = [] + for i in range(1, 5): + if 'group_%d_name' % (i) in config: + lights.append( + LimitlessLED( + led, + i, + config['group_%d_name' % (i)], + STATE_OFF + ) + ) + + add_devices_callback(lights) + + +class LimitlessLED(ToggleEntity): + def __init__(self, led, group, name, state, brightness=180): + self.led = led + self.group = group + + # LimitlessLEDs don't report state, we have track it ourselves. + self.led.off(self.group) + + self._name = name or DEVICE_DEFAULT_NAME + self._state = state + self._brightness = brightness + + @property + def should_poll(self): + """ No polling needed for a demo light. """ + return False + + @property + def name(self): + """ Returns the name of the device if any. """ + return self._name + + @property + def state(self): + """ Returns the name of the device if any. """ + return self._state + + @property + def state_attributes(self): + """ Returns optional state attributes. """ + if self.is_on: + return { + ATTR_BRIGHTNESS: self._brightness, + } + + @property + def is_on(self): + """ True if device is on. """ + return self._state == STATE_ON + + def turn_on(self, **kwargs): + """ Turn the device on. """ + self._state = STATE_ON + + if ATTR_BRIGHTNESS in kwargs: + self._brightness = kwargs[ATTR_BRIGHTNESS] + + self.led.set_brightness(self._brightness, self.group) + + def turn_off(self, **kwargs): + """ Turn the device off. """ + self._state = STATE_OFF + self.led.off(self.group)
10318a11dded5e69c3d9c98325613700c9b3db63
lib/spack/spack/cmd/dependents.py
lib/spack/spack/cmd/dependents.py
############################################################################## # Copyright (c) 2013, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://scalability-llnl.github.io/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License (as published by # the Free Software Foundation) version 2.1 dated February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## import argparse import llnl.util.tty as tty import spack import spack.cmd description = "Show dependent packages." def setup_parser(subparser): subparser.add_argument( 'spec', nargs=argparse.REMAINDER, help="specs to list dependencies of.") def dependents(parser, args): specs = spack.cmd.parse_specs(args.spec, concretize=True) if len(specs) != 1: tty.die("spack dependents takes only one spec.") fmt = '$_$@$%@$+$=$#' deps = [d.format(fmt) for d in specs[0].package.installed_dependents] tty.msg("Dependents of %s" % specs[0].format(fmt), *deps)
Fix for dependent package detection.
Fix for dependent package detection.
Python
lgpl-2.1
lgarren/spack,lgarren/spack,iulian787/spack,mfherbst/spack,skosukhin/spack,LLNL/spack,LLNL/spack,EmreAtes/spack,iulian787/spack,tmerrick1/spack,TheTimmy/spack,mfherbst/spack,mfherbst/spack,tmerrick1/spack,lgarren/spack,krafczyk/spack,iulian787/spack,krafczyk/spack,skosukhin/spack,matthiasdiener/spack,lgarren/spack,EmreAtes/spack,matthiasdiener/spack,TheTimmy/spack,iulian787/spack,tmerrick1/spack,skosukhin/spack,LLNL/spack,krafczyk/spack,TheTimmy/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack,LLNL/spack,TheTimmy/spack,lgarren/spack,krafczyk/spack,EmreAtes/spack,mfherbst/spack,skosukhin/spack,TheTimmy/spack,krafczyk/spack,skosukhin/spack,EmreAtes/spack,mfherbst/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,EmreAtes/spack,matthiasdiener/spack
--- +++ @@ -0,0 +1,46 @@ +############################################################################## +# Copyright (c) 2013, Lawrence Livermore National Security, LLC. +# Produced at the Lawrence Livermore National Laboratory. +# +# This file is part of Spack. +# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. +# LLNL-CODE-647188 +# +# For details, see https://scalability-llnl.github.io/spack +# Please also see the LICENSE file for our notice and the LGPL. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License (as published by +# the Free Software Foundation) version 2.1 dated February 1999. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and +# conditions of the GNU General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +############################################################################## +import argparse + +import llnl.util.tty as tty + +import spack +import spack.cmd + +description = "Show dependent packages." + +def setup_parser(subparser): + subparser.add_argument( + 'spec', nargs=argparse.REMAINDER, help="specs to list dependencies of.") + + +def dependents(parser, args): + specs = spack.cmd.parse_specs(args.spec, concretize=True) + if len(specs) != 1: + tty.die("spack dependents takes only one spec.") + + fmt = '$_$@$%@$+$=$#' + deps = [d.format(fmt) for d in specs[0].package.installed_dependents] + tty.msg("Dependents of %s" % specs[0].format(fmt), *deps)
8d1016437e87794fb39b447b51427bae98a51bc2
classes/jsonip.py
classes/jsonip.py
from json import load from urllib2 import urlopen class JsonIp: def __init__(self): url = 'https://jsonip.com/' uri = urlopen(url) response = load(uri) self.ip = response["ip"] # self.ip = '1.1.1.1'
Add one public IP provider
Add one public IP provider
Python
mit
Saphyel/ipteller,Saphyel/ipteller
--- +++ @@ -0,0 +1,11 @@ +from json import load +from urllib2 import urlopen + +class JsonIp: + + def __init__(self): + url = 'https://jsonip.com/' + uri = urlopen(url) + response = load(uri) + self.ip = response["ip"] + # self.ip = '1.1.1.1'
ef1fa03d753f5d8a0b32831320a1b3e076ace363
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
moksha/apps/demo/MokshaJQPlotDemo/run_tests.py
#!/usr/bin/env python """ nose runner script. """ __requires__ = 'moksha' import pkg_resources import nose if __name__ == '__main__': nose.main()
Add a test runner for our jqplot demo too
Add a test runner for our jqplot demo too
Python
apache-2.0
ralphbean/moksha,mokshaproject/moksha,lmacken/moksha,mokshaproject/moksha,pombredanne/moksha,mokshaproject/moksha,pombredanne/moksha,ralphbean/moksha,lmacken/moksha,lmacken/moksha,ralphbean/moksha,pombredanne/moksha,pombredanne/moksha,mokshaproject/moksha
--- +++ @@ -0,0 +1,11 @@ +#!/usr/bin/env python +""" +nose runner script. +""" +__requires__ = 'moksha' + +import pkg_resources +import nose + +if __name__ == '__main__': + nose.main()
c8ec0689950a5fea0aff98afe54b172bd84e2ce9
examples/coregister.py
examples/coregister.py
"""Example using Tom's registration code from scipy. """ from os import path from glob import glob import scipy.ndimage._registration as reg # Data files basedir = '/Users/cburns/data/twaite' anatfile = path.join(basedir, 'ANAT1_V0001.img') funcdir = path.join(basedir, 'fMRIData') fileglob = path.join(funcdir, 'FUNC1_V000?.img') # Get first 10 images if __name__ == '__main__': print 'Coregister anatomical:\n', anatfile print '\nWith these functional images:' funclist = glob(fileglob) for func in funclist: print func measures, imageF_anat, fmri_series = \ reg.demo_MRI_coregistration(anatfile, funclist[0:4])
Add example using Tom's registration code in scipy.
Add example using Tom's registration code in scipy.
Python
bsd-3-clause
yarikoptic/NiPy-OLD,yarikoptic/NiPy-OLD
--- +++ @@ -0,0 +1,23 @@ +"""Example using Tom's registration code from scipy. + +""" + +from os import path +from glob import glob + +import scipy.ndimage._registration as reg + +# Data files +basedir = '/Users/cburns/data/twaite' +anatfile = path.join(basedir, 'ANAT1_V0001.img') +funcdir = path.join(basedir, 'fMRIData') +fileglob = path.join(funcdir, 'FUNC1_V000?.img') # Get first 10 images + +if __name__ == '__main__': + print 'Coregister anatomical:\n', anatfile + print '\nWith these functional images:' + funclist = glob(fileglob) + for func in funclist: + print func + measures, imageF_anat, fmri_series = \ + reg.demo_MRI_coregistration(anatfile, funclist[0:4])
62b01c3c1614d5719cc69be951b2f6c660e40faa
pyldap/libldap/tools.py
pyldap/libldap/tools.py
def iterate_array(arr, f=None): i = 0 while True: if not arr[i]: break yield arr[i] if f is None else f(arr[i]) i += 1
Add generic function for iterating arrays.
Add generic function for iterating arrays.
Python
bsd-3-clause
matusvalo/python-easyldap
--- +++ @@ -0,0 +1,7 @@ +def iterate_array(arr, f=None): + i = 0 + while True: + if not arr[i]: + break + yield arr[i] if f is None else f(arr[i]) + i += 1
b9759f60c9f107c3d2c319f53ed2985ee58dc319
src/tests/test_mock_pose.py
src/tests/test_mock_pose.py
try: from unittest.mock import patch, MagicMock except ImportError: from mock import patch, MagicMock import pytest import rospy MockTf2 = MagicMock() modules = {"tf2_ros": MockTf2} patcher = patch.dict("sys.modules", modules) patcher.start() try: rospy.init_node("pytest", anonymous=True) except rospy.exceptions.ROSException: pass @pytest.fixture(scope="module") def teardown_module(): def fin(): patcher.stop() class TestPoseGenerator(object): def test_tf_and_pose_same(self): from mock_pose import PoseGenerator pose = PoseGenerator.generate_pose() transform = PoseGenerator.pose_to_tf(pose) assert transform.transform.translation.x == pose.pose.position.x assert transform.transform.translation.y == pose.pose.position.y assert transform.transform.translation.z == pose.pose.position.z assert transform.transform.rotation.x == pose.pose.orientation.x assert transform.transform.rotation.y == pose.pose.orientation.y assert transform.transform.rotation.z == pose.pose.orientation.z assert transform.transform.rotation.w == pose.pose.orientation.w
Write test for mock_pose generator.
Write test for mock_pose generator.
Python
mit
masasin/spirit,masasin/spirit
--- +++ @@ -0,0 +1,42 @@ +try: + from unittest.mock import patch, MagicMock +except ImportError: + from mock import patch, MagicMock + +import pytest + +import rospy + +MockTf2 = MagicMock() +modules = {"tf2_ros": MockTf2} +patcher = patch.dict("sys.modules", modules) +patcher.start() + + +try: + rospy.init_node("pytest", anonymous=True) +except rospy.exceptions.ROSException: + pass + + +@pytest.fixture(scope="module") +def teardown_module(): + def fin(): + patcher.stop() + + +class TestPoseGenerator(object): + def test_tf_and_pose_same(self): + from mock_pose import PoseGenerator + + pose = PoseGenerator.generate_pose() + transform = PoseGenerator.pose_to_tf(pose) + + assert transform.transform.translation.x == pose.pose.position.x + assert transform.transform.translation.y == pose.pose.position.y + assert transform.transform.translation.z == pose.pose.position.z + + assert transform.transform.rotation.x == pose.pose.orientation.x + assert transform.transform.rotation.y == pose.pose.orientation.y + assert transform.transform.rotation.z == pose.pose.orientation.z + assert transform.transform.rotation.w == pose.pose.orientation.w
b3dcbe95d766d902d22a0c4c171cbbe5ce207571
python/tests/stress_test.py
python/tests/stress_test.py
#!/usr/bin/env python import time import sys import os from random import randint # Hack to import from a parent dir # http://stackoverflow.com/a/11158224/401554 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, parentdir) from octo import Octo octo = Octo('/dev/ttyACM0') octo.reset() # Test that the LEDs don't burn out or short or what knot during continious active state while True: time.sleep(1) octo.led0(randint(0,255),randint(0,255),randint(0,255)) time.sleep(1) octo.led1(randint(0,255),randint(0,255),randint(0,255)) time.sleep(randint(60,120)) octo.led1(randint(0,255),randint(0,255),randint(0,255)) time.sleep(1) octo.led0(randint(0,255),randint(0,255),randint(0,255)) time.sleep(randint(60,120))
Add test for longitivity-testing: both LED-s ON at the same time for extended periods of time
Add test for longitivity-testing: both LED-s ON at the same time for extended periods of time
Python
mit
anroots/teensy-moonica
--- +++ @@ -0,0 +1,27 @@ +#!/usr/bin/env python +import time +import sys +import os +from random import randint + +# Hack to import from a parent dir +# http://stackoverflow.com/a/11158224/401554 +parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +sys.path.insert(0, parentdir) + +from octo import Octo + +octo = Octo('/dev/ttyACM0') +octo.reset() + +# Test that the LEDs don't burn out or short or what knot during continious active state +while True: + time.sleep(1) + octo.led0(randint(0,255),randint(0,255),randint(0,255)) + time.sleep(1) + octo.led1(randint(0,255),randint(0,255),randint(0,255)) + time.sleep(randint(60,120)) + octo.led1(randint(0,255),randint(0,255),randint(0,255)) + time.sleep(1) + octo.led0(randint(0,255),randint(0,255),randint(0,255)) + time.sleep(randint(60,120))
407c08899eccea60a2ae534ab0c1b000c58708ab
tests/test_agent_api.py
tests/test_agent_api.py
# No shebang line, this module is meant to be imported # # Copyright 2013 Oliver Palmer # Copyright 2013 Ambient Entertainment GmbH & Co. KG # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from utcore import ModelTestCase from pyfarm.master.utility import dumps try: from json import loads except ImportError: from simplejson import loads class TestAgentAPI(ModelTestCase): def test_agents_schema(self): response = self.client.get("/api/v1/agents/schema") self.assert200(response) self.assertEquals(response.json, {"ram": "INTEGER", "free_ram": "INTEGER", "use_address": "INTEGER", "ip": "IPv4Address", "hostname": "VARCHAR(255)", "cpus": "INTEGER", "port": "INTEGER", "state": "INTEGER", "ram_allocation": "FLOAT", "cpu_allocation": "FLOAT", "id": "INTEGER", "remote_ip": "IPv4Address"}) def test_agent_read_write(self): response1 = self.client.post("/api/v1/agents", content_type="application/json", data = dumps({"cpu_allocation": 1.0, "cpus": 16, "free_ram": 133, "hostname": "testagent1", "ip": "10.0.200.1", "port": 64994, "ram": 2048, "ram_allocation": 0.8, "state": 8 })) self.assertStatus(response1, 201) id = loads(response1.data)['id'] response2 = self.client.get("/api/v1/agents/%d" % id) self.assert200(response2) agent_data = loads(response2.data) assert len(agent_data) == 12 assert response2.json == { "ram": 2048, "cpu_allocation": 1.0, "use_address": 22, "ip": "10.0.200.1", "hostname": "testagent1", "cpus": 16, "ram_allocation": 0.8, "port": 64994, "state": 8, "free_ram": 133, "id": id, "remote_ip": None } # TODO Test updating an agent
Implement some tests for AgentAPI
Implement some tests for AgentAPI
Python
apache-2.0
pyfarm/pyfarm-master,pyfarm/pyfarm-master,pyfarm/pyfarm-master
--- +++ @@ -0,0 +1,77 @@ +# No shebang line, this module is meant to be imported +# +# Copyright 2013 Oliver Palmer +# Copyright 2013 Ambient Entertainment GmbH & Co. KG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +from utcore import ModelTestCase +from pyfarm.master.utility import dumps +try: + from json import loads +except ImportError: + from simplejson import loads + +class TestAgentAPI(ModelTestCase): + def test_agents_schema(self): + response = self.client.get("/api/v1/agents/schema") + self.assert200(response) + self.assertEquals(response.json, {"ram": "INTEGER", + "free_ram": "INTEGER", + "use_address": "INTEGER", + "ip": "IPv4Address", + "hostname": "VARCHAR(255)", + "cpus": "INTEGER", + "port": "INTEGER", + "state": "INTEGER", + "ram_allocation": "FLOAT", + "cpu_allocation": "FLOAT", + "id": "INTEGER", + "remote_ip": "IPv4Address"}) + + def test_agent_read_write(self): + response1 = self.client.post("/api/v1/agents", + content_type="application/json", + data = dumps({"cpu_allocation": 1.0, + "cpus": 16, + "free_ram": 133, + "hostname": "testagent1", + "ip": "10.0.200.1", + "port": 64994, + "ram": 2048, + "ram_allocation": 0.8, + "state": 8 + })) + self.assertStatus(response1, 201) + id = loads(response1.data)['id'] + + response2 = self.client.get("/api/v1/agents/%d" % id) + self.assert200(response2) + agent_data = loads(response2.data) + assert len(agent_data) == 12 + assert response2.json == { + "ram": 2048, + "cpu_allocation": 1.0, + "use_address": 22, + "ip": "10.0.200.1", + "hostname": "testagent1", + "cpus": 16, + "ram_allocation": 0.8, + "port": 64994, + "state": 8, + "free_ram": 133, + "id": id, + "remote_ip": None + } + # TODO Test updating an agent
33448340d278da7e0653701d78cbab317893279d
AG/datasets/analyze.py
AG/datasets/analyze.py
#!/usr/bin/python import os import sys import lxml from lxml import etree import math class StatsCounter(object): prefixes = {} cur_tag = None def start( self, tag, attrib ): self.cur_tag = tag def end( self, tag ): pass #self.cur_tag = None def data( self, _data ): if self.cur_tag != "File" and self.cur_tag != "Dir": return data = _data.rstrip("/") if data == "": return dir_name = os.path.dirname( data ) if dir_name == "": return if not self.prefixes.has_key( dir_name ): self.prefixes[ dir_name ] = 0 self.prefixes[ dir_name ] += 1 def close( self ): return "closed!" if __name__ == "__main__": counter = StatsCounter() parser = etree.XMLParser( target=counter ) fd = open( sys.argv[1], "r" ) while True: buf = fd.read( 32768 ) if len(buf) == 0: break parser.feed( buf ) result = parser.close() order = counter.prefixes.keys() order.sort() size_bins = {} for path in order: count = counter.prefixes[path] print "% 15s %s" % (count, path) size_bin = int(math.log(count, 10)) if not size_bins.has_key( size_bin ): size_bins[ size_bin ] = 1 else: size_bins[ size_bin ] += 1 print "" print "sizes" max_bin = max( size_bins.keys() ) bin_fmt = r"1e%0" + str( int(math.log(max_bin, 10)) + 1 ) + "s" for size in xrange( 0, max_bin + 1 ): binsize = 0 if size_bins.has_key( size ): binsize = size_bins[size] bin_str = bin_fmt % size print "%s %s" % (bin_str, binsize)
Add a simple analysis tool to get some structural properties about an AG's specfile.
Add a simple analysis tool to get some structural properties about an AG's specfile.
Python
apache-2.0
jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate
--- +++ @@ -0,0 +1,89 @@ +#!/usr/bin/python + +import os +import sys +import lxml +from lxml import etree +import math + +class StatsCounter(object): + + prefixes = {} + cur_tag = None + + def start( self, tag, attrib ): + self.cur_tag = tag + + def end( self, tag ): + pass + #self.cur_tag = None + + def data( self, _data ): + if self.cur_tag != "File" and self.cur_tag != "Dir": + return + + data = _data.rstrip("/") + if data == "": + return + + dir_name = os.path.dirname( data ) + if dir_name == "": + return + + if not self.prefixes.has_key( dir_name ): + self.prefixes[ dir_name ] = 0 + + self.prefixes[ dir_name ] += 1 + + def close( self ): + return "closed!" + +if __name__ == "__main__": + + counter = StatsCounter() + parser = etree.XMLParser( target=counter ) + + fd = open( sys.argv[1], "r" ) + + while True: + buf = fd.read( 32768 ) + if len(buf) == 0: + break + + parser.feed( buf ) + + result = parser.close() + + order = counter.prefixes.keys() + order.sort() + + size_bins = {} + + for path in order: + count = counter.prefixes[path] + print "% 15s %s" % (count, path) + + size_bin = int(math.log(count, 10)) + + if not size_bins.has_key( size_bin ): + size_bins[ size_bin ] = 1 + + else: + size_bins[ size_bin ] += 1 + + print "" + print "sizes" + max_bin = max( size_bins.keys() ) + + bin_fmt = r"1e%0" + str( int(math.log(max_bin, 10)) + 1 ) + "s" + + for size in xrange( 0, max_bin + 1 ): + binsize = 0 + if size_bins.has_key( size ): + binsize = size_bins[size] + + bin_str = bin_fmt % size + print "%s %s" % (bin_str, binsize) + + +
a9ca7f2f22551256213ecd32047022048c72db5c
scripts/convert_svgs.py
scripts/convert_svgs.py
import cairosvg import os # MUST RUN IN PYTHON 3 and pip install cairosvg file_dir = '../data/hough_test/Test_Set_1/' svgs = os.listdir(os.path.join(file_dir, 'SVGs')) for svg in svgs: name = svg.split('.svg')[0] cairosvg.svg2png(url=os.path.join(file_dir, 'SVGs', svg), write_to=os.path.join(file_dir, 'PNGs', '{0}.png'.format(name)), dpi=600) # cairosvg.svg2pdf(url=os.path.join(file_dir, 'SVGs', svg), # write_to=os.path.join(file_dir, 'PDFs', '{0}.pdf'.format(name)), dpi=600)
Add Python 3 Script for Converting Image Types
Add Python 3 Script for Converting Image Types
Python
mit
Molecular-Image-Recognition/Molecular-Image-Recognition
--- +++ @@ -0,0 +1,15 @@ +import cairosvg +import os + +# MUST RUN IN PYTHON 3 and pip install cairosvg + +file_dir = '../data/hough_test/Test_Set_1/' + +svgs = os.listdir(os.path.join(file_dir, 'SVGs')) + +for svg in svgs: + name = svg.split('.svg')[0] + cairosvg.svg2png(url=os.path.join(file_dir, 'SVGs', svg), + write_to=os.path.join(file_dir, 'PNGs', '{0}.png'.format(name)), dpi=600) + # cairosvg.svg2pdf(url=os.path.join(file_dir, 'SVGs', svg), + # write_to=os.path.join(file_dir, 'PDFs', '{0}.pdf'.format(name)), dpi=600)
30a0b17d028f279a9877150ac4eb60b1ce135fa2
checks/check_multiply_hue_and_saturation.py
checks/check_multiply_hue_and_saturation.py
from __future__ import print_function, division import numpy as np import imgaug as ia from imgaug import augmenters as iaa def main(): image = ia.quokka_square((128, 128)) images_aug = [] for mul in np.linspace(0.0, 2.0, 10): aug = iaa.MultiplyHueAndSaturation(mul) image_aug = aug.augment_image(image) images_aug.append(image_aug) for mul_hue in np.linspace(0.0, 5.0, 10): aug = iaa.MultiplyHueAndSaturation(mul_hue=mul_hue) image_aug = aug.augment_image(image) images_aug.append(image_aug) for mul_saturation in np.linspace(0.0, 5.0, 10): aug = iaa.MultiplyHueAndSaturation(mul_saturation=mul_saturation) image_aug = aug.augment_image(image) images_aug.append(image_aug) ia.imshow(ia.draw_grid(images_aug, rows=3)) images_aug = [] images_aug.extend(iaa.MultiplyHue().augment_images([image] * 10)) images_aug.extend(iaa.MultiplySaturation().augment_images([image] * 10)) ia.imshow(ia.draw_grid(images_aug, rows=2)) if __name__ == "__main__": main()
Add check script for MultiplyHueAndSaturation
Add check script for MultiplyHueAndSaturation
Python
mit
aleju/imgaug,aleju/imgaug,aleju/ImageAugmenter
--- +++ @@ -0,0 +1,37 @@ +from __future__ import print_function, division + +import numpy as np + +import imgaug as ia +from imgaug import augmenters as iaa + + +def main(): + image = ia.quokka_square((128, 128)) + images_aug = [] + + for mul in np.linspace(0.0, 2.0, 10): + aug = iaa.MultiplyHueAndSaturation(mul) + image_aug = aug.augment_image(image) + images_aug.append(image_aug) + + for mul_hue in np.linspace(0.0, 5.0, 10): + aug = iaa.MultiplyHueAndSaturation(mul_hue=mul_hue) + image_aug = aug.augment_image(image) + images_aug.append(image_aug) + + for mul_saturation in np.linspace(0.0, 5.0, 10): + aug = iaa.MultiplyHueAndSaturation(mul_saturation=mul_saturation) + image_aug = aug.augment_image(image) + images_aug.append(image_aug) + + ia.imshow(ia.draw_grid(images_aug, rows=3)) + + images_aug = [] + images_aug.extend(iaa.MultiplyHue().augment_images([image] * 10)) + images_aug.extend(iaa.MultiplySaturation().augment_images([image] * 10)) + ia.imshow(ia.draw_grid(images_aug, rows=2)) + + +if __name__ == "__main__": + main()
242479ace03928b20dc86806f7592ec1148b615b
service/test/integration/test_draft_service.py
service/test/integration/test_draft_service.py
# # Copyright (c) 2014 ThoughtWorks, Inc. # # Pixelated is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pixelated is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Pixelated. If not, see <http://www.gnu.org/licenses/>. from twisted.internet import defer from test.support.integration import SoledadTestBase, MailBuilder class DraftServiceTest(SoledadTestBase): @defer.inlineCallbacks def test_store_and_load_draft(self): input_mail = MailBuilder().with_body('some test text').build_input_mail() stored_draft = yield self.draft_service.create_draft(input_mail) draft = yield self.mail_store.get_mail(stored_draft.ident, include_body=True) self.assertEqual('some test text', draft.body)
Add integration test for DraftService.
Add integration test for DraftService.
Python
agpl-3.0
pixelated-project/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,kaeff/pixelated-user-agent,SamuelToh/pixelated-user-agent,pixelated-project/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,torquemad/pixelated-user-agent,phazel/pixelated-user-agent,rdoh/pixelated-user-agent,phazel/pixelated-user-agent,SamuelToh/pixelated-user-agent,pixelated/pixelated-user-agent,sw00/pixelated-user-agent,sw00/pixelated-user-agent,pixelated-project/pixelated-user-agent,rdoh/pixelated-user-agent,rdoh/pixelated-user-agent,kaeff/pixelated-user-agent,kaeff/pixelated-user-agent,phazel/pixelated-user-agent,phazel/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated/pixelated-user-agent,SamuelToh/pixelated-user-agent,SamuelToh/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,rdoh/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,PuZZleDucK/pixelated-user-agent,pixelated/pixelated-user-agent,pixelated-project/pixelated-user-agent,sw00/pixelated-user-agent,sw00/pixelated-user-agent,SamuelToh/pixelated-user-agent,phazel/pixelated-user-agent,rdoh/pixelated-user-agent,kaeff/pixelated-user-agent,kaeff/pixelated-user-agent,sw00/pixelated-user-agent
--- +++ @@ -0,0 +1,32 @@ +# +# Copyright (c) 2014 ThoughtWorks, Inc. +# +# Pixelated is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Pixelated is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with Pixelated. If not, see <http://www.gnu.org/licenses/>. + +from twisted.internet import defer + +from test.support.integration import SoledadTestBase, MailBuilder + + +class DraftServiceTest(SoledadTestBase): + + @defer.inlineCallbacks + def test_store_and_load_draft(self): + input_mail = MailBuilder().with_body('some test text').build_input_mail() + + stored_draft = yield self.draft_service.create_draft(input_mail) + + draft = yield self.mail_store.get_mail(stored_draft.ident, include_body=True) + + self.assertEqual('some test text', draft.body)
d85442d5961602ae91c385a65e9503c409316b3f
bin/scrub_stale_lists.py
bin/scrub_stale_lists.py
#!/usr/bin/env python import sys import os import time import redis import requests import logging from urlparse import urlparse from datetime import timedelta def main(rds): pf = "coalesce.v1." tasks_removed = 0 lists_removed = 0 list_keys = rds.smembers(pf + "list_keys") for key in list_keys: logging.debug("Inspecting list: " + pf + key) coalesce_list = rds.lrange(pf + "lists." + key, start=0, end=-1) for taskId in coalesce_list: logging.debug(" - inspecting task: " + taskId) if not is_pending(taskId): logging.debug("Removing stale task: " + taskId) rds.lrem(pf + 'lists.' + key, taskId, num=0) tasks_removed += 1 if not rds.llen(pf + "lists." + key): logging.debug("Removing stale list key: " + key) rds.srem(pf + "list_keys", key) lists_removed += 1 return tasks_removed, lists_removed def is_pending(taskId): url = 'https://queue.taskcluster.net/v1/task/%s/status' % (taskId) try: r = requests.get(url, timeout=3) if r.status_code == 404: logging.debug("Queue service returned 404 for task: " + taskId) return False if not r.json()['status']['state'] == 'pending': return False except: logging.debug("Failed to get status") return True if __name__ == '__main__': logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.DEBUG) try: redis_url = urlparse(os.environ['REDIS_URL']) except KeyError: logging.exception("Missing REDIS_URL env variable") sys.exit(1) rds = redis.Redis(host=redis_url.hostname, port=redis_url.port, password=redis_url.password) try: start = time.time() logging.info("Starting scrub task") tasks_removed, lists_removed = main(rds) elapsed = time.time() - start logging.info("Completed scrub task in %s" % (str(timedelta(seconds=elapsed)))) logging.info("Removed %s lists and %s tasks" % (tasks_removed, lists_removed)) except Exception: logging.exception("Fatal error in main loop")
Scrub stale data from redis
Scrub stale data from redis
Python
mpl-2.0
mozilla/tc-coalesce,dividehex/tc-coalesce
--- +++ @@ -0,0 +1,72 @@ +#!/usr/bin/env python + +import sys +import os +import time +import redis +import requests +import logging +from urlparse import urlparse +from datetime import timedelta + + +def main(rds): + pf = "coalesce.v1." + + tasks_removed = 0 + lists_removed = 0 + + list_keys = rds.smembers(pf + "list_keys") + for key in list_keys: + logging.debug("Inspecting list: " + pf + key) + coalesce_list = rds.lrange(pf + "lists." + key, start=0, end=-1) + for taskId in coalesce_list: + logging.debug(" - inspecting task: " + taskId) + if not is_pending(taskId): + logging.debug("Removing stale task: " + taskId) + rds.lrem(pf + 'lists.' + key, taskId, num=0) + tasks_removed += 1 + if not rds.llen(pf + "lists." + key): + logging.debug("Removing stale list key: " + key) + rds.srem(pf + "list_keys", key) + lists_removed += 1 + + return tasks_removed, lists_removed + +def is_pending(taskId): + url = 'https://queue.taskcluster.net/v1/task/%s/status' % (taskId) + try: + r = requests.get(url, timeout=3) + if r.status_code == 404: + logging.debug("Queue service returned 404 for task: " + taskId) + return False + if not r.json()['status']['state'] == 'pending': + return False + except: + logging.debug("Failed to get status") + return True + +if __name__ == '__main__': + logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', + level=logging.DEBUG) + + try: + redis_url = urlparse(os.environ['REDIS_URL']) + except KeyError: + logging.exception("Missing REDIS_URL env variable") + sys.exit(1) + + rds = redis.Redis(host=redis_url.hostname, + port=redis_url.port, + password=redis_url.password) + + try: + start = time.time() + logging.info("Starting scrub task") + + tasks_removed, lists_removed = main(rds) + elapsed = time.time() - start + logging.info("Completed scrub task in %s" % (str(timedelta(seconds=elapsed)))) + logging.info("Removed %s lists and %s tasks" % (tasks_removed, lists_removed)) + except Exception: + logging.exception("Fatal error in main loop")
9870fdd4b0996254216ff85a4dc0f9706843ca50
tests/basics/while_nest_exc.py
tests/basics/while_nest_exc.py
# test nested whiles within a try-except while 1: print(1) try: print(2) while 1: print(3) break except: print(4) print(5) break
Add test for nested while with exc and break.
tests: Add test for nested while with exc and break.
Python
mit
ruffy91/micropython,jmarcelino/pycom-micropython,henriknelson/micropython,puuu/micropython,neilh10/micropython,xuxiaoxin/micropython,utopiaprince/micropython,cwyark/micropython,aethaniel/micropython,tuc-osg/micropython,vitiral/micropython,orionrobots/micropython,noahchense/micropython,paul-xxx/micropython,ganshun666/micropython,tralamazza/micropython,kostyll/micropython,trezor/micropython,AriZuu/micropython,mhoffma/micropython,mgyenik/micropython,infinnovation/micropython,torwag/micropython,SungEun-Steve-Kim/test-mp,suda/micropython,deshipu/micropython,hosaka/micropython,SungEun-Steve-Kim/test-mp,ChuckM/micropython,adafruit/circuitpython,deshipu/micropython,toolmacher/micropython,matthewelse/micropython,AriZuu/micropython,SHA2017-badge/micropython-esp32,micropython/micropython-esp32,tuc-osg/micropython,xuxiaoxin/micropython,ryannathans/micropython,paul-xxx/micropython,drrk/micropython,selste/micropython,kostyll/micropython,ahotam/micropython,SHA2017-badge/micropython-esp32,feilongfl/micropython,adamkh/micropython,MrSurly/micropython,warner83/micropython,supergis/micropython,Peetz0r/micropython-esp32,alex-march/micropython,torwag/micropython,suda/micropython,tuc-osg/micropython,misterdanb/micropython,martinribelotta/micropython,ChuckM/micropython,jimkmc/micropython,chrisdearman/micropython,hosaka/micropython,danicampora/micropython,rubencabrera/micropython,alex-robbins/micropython,danicampora/micropython,mianos/micropython,orionrobots/micropython,Timmenem/micropython,puuu/micropython,ahotam/micropython,adafruit/circuitpython,turbinenreiter/micropython,martinribelotta/micropython,neilh10/micropython,alex-robbins/micropython,selste/micropython,pozetroninc/micropython,stonegithubs/micropython,ceramos/micropython,misterdanb/micropython,cwyark/micropython,tdautc19841202/micropython,MrSurly/micropython-esp32,methoxid/micropystat,stonegithubs/micropython,noahwilliamsson/micropython,swegener/micropython,martinribelotta/micropython,vitiral/micropython,lowRISC/micropython,torwag/micropython,tobbad/micropython,supergis/micropython,ceramos/micropython,Vogtinator/micropython,chrisdearman/micropython,AriZuu/micropython,jlillest/micropython,infinnovation/micropython,heisewangluo/micropython,jlillest/micropython,dinau/micropython,galenhz/micropython,vriera/micropython,cloudformdesign/micropython,matthewelse/micropython,blmorris/micropython,ryannathans/micropython,oopy/micropython,mpalomer/micropython,turbinenreiter/micropython,adafruit/circuitpython,infinnovation/micropython,jmarcelino/pycom-micropython,praemdonck/micropython,selste/micropython,dhylands/micropython,firstval/micropython,aethaniel/micropython,pozetroninc/micropython,noahwilliamsson/micropython,blmorris/micropython,danicampora/micropython,matthewelse/micropython,tobbad/micropython,xhat/micropython,ryannathans/micropython,ahotam/micropython,omtinez/micropython,hiway/micropython,emfcamp/micropython,matthewelse/micropython,TDAbboud/micropython,EcmaXp/micropython,jimkmc/micropython,methoxid/micropystat,galenhz/micropython,stonegithubs/micropython,ahotam/micropython,emfcamp/micropython,drrk/micropython,bvernoux/micropython,heisewangluo/micropython,henriknelson/micropython,neilh10/micropython,adafruit/micropython,cwyark/micropython,SHA2017-badge/micropython-esp32,hiway/micropython,misterdanb/micropython,supergis/micropython,blazewicz/micropython,tdautc19841202/micropython,mhoffma/micropython,ceramos/micropython,bvernoux/micropython,redbear/micropython,blmorris/micropython,ericsnowcurrently/micropython,EcmaXp/micropython,orionrobots/micropython,mpalomer/micropython,tobbad/micropython,swegener/micropython,cloudformdesign/micropython,tralamazza/micropython,dxxb/micropython,swegener/micropython,henriknelson/micropython,EcmaXp/micropython,dmazzella/micropython,redbear/micropython,lbattraw/micropython,ChuckM/micropython,omtinez/micropython,galenhz/micropython,pramasoul/micropython,alex-robbins/micropython,dinau/micropython,lbattraw/micropython,ChuckM/micropython,praemdonck/micropython,feilongfl/micropython,noahwilliamsson/micropython,adafruit/micropython,PappaPeppar/micropython,adamkh/micropython,rubencabrera/micropython,dxxb/micropython,cwyark/micropython,dinau/micropython,adafruit/micropython,methoxid/micropystat,PappaPeppar/micropython,blazewicz/micropython,dxxb/micropython,henriknelson/micropython,dxxb/micropython,emfcamp/micropython,heisewangluo/micropython,micropython/micropython-esp32,danicampora/micropython,mianos/micropython,mianos/micropython,blazewicz/micropython,lbattraw/micropython,Vogtinator/micropython,Timmenem/micropython,trezor/micropython,infinnovation/micropython,pramasoul/micropython,KISSMonX/micropython,ruffy91/micropython,feilongfl/micropython,redbear/micropython,mpalomer/micropython,warner83/micropython,mhoffma/micropython,EcmaXp/micropython,mianos/micropython,blazewicz/micropython,xuxiaoxin/micropython,kostyll/micropython,kerneltask/micropython,feilongfl/micropython,chrisdearman/micropython,firstval/micropython,aethaniel/micropython,praemdonck/micropython,paul-xxx/micropython,adafruit/micropython,neilh10/micropython,skybird6672/micropython,pfalcon/micropython,tobbad/micropython,mianos/micropython,kostyll/micropython,ruffy91/micropython,pramasoul/micropython,ericsnowcurrently/micropython,jmarcelino/pycom-micropython,pozetroninc/micropython,cnoviello/micropython,pozetroninc/micropython,xyb/micropython,skybird6672/micropython,mgyenik/micropython,pfalcon/micropython,ericsnowcurrently/micropython,aethaniel/micropython,puuu/micropython,chrisdearman/micropython,MrSurly/micropython,SHA2017-badge/micropython-esp32,emfcamp/micropython,pfalcon/micropython,HenrikSolver/micropython,EcmaXp/micropython,galenhz/micropython,hosaka/micropython,galenhz/micropython,vriera/micropython,blmorris/micropython,kerneltask/micropython,mpalomer/micropython,utopiaprince/micropython,kostyll/micropython,cwyark/micropython,SungEun-Steve-Kim/test-mp,stonegithubs/micropython,danicampora/micropython,mgyenik/micropython,toolmacher/micropython,xyb/micropython,tobbad/micropython,tdautc19841202/micropython,drrk/micropython,dmazzella/micropython,ernesto-g/micropython,paul-xxx/micropython,ganshun666/micropython,methoxid/micropystat,TDAbboud/micropython,micropython/micropython-esp32,ernesto-g/micropython,adafruit/circuitpython,jimkmc/micropython,TDAbboud/micropython,ericsnowcurrently/micropython,jimkmc/micropython,suda/micropython,swegener/micropython,pfalcon/micropython,henriknelson/micropython,lowRISC/micropython,cnoviello/micropython,HenrikSolver/micropython,micropython/micropython-esp32,mhoffma/micropython,bvernoux/micropython,kerneltask/micropython,cnoviello/micropython,ganshun666/micropython,SungEun-Steve-Kim/test-mp,SungEun-Steve-Kim/test-mp,neilh10/micropython,deshipu/micropython,cnoviello/micropython,orionrobots/micropython,bvernoux/micropython,MrSurly/micropython-esp32,noahchense/micropython,pramasoul/micropython,praemdonck/micropython,vriera/micropython,ganshun666/micropython,pfalcon/micropython,supergis/micropython,HenrikSolver/micropython,hosaka/micropython,ernesto-g/micropython,xyb/micropython,AriZuu/micropython,omtinez/micropython,tdautc19841202/micropython,noahchense/micropython,rubencabrera/micropython,ceramos/micropython,MrSurly/micropython-esp32,Vogtinator/micropython,ryannathans/micropython,trezor/micropython,Timmenem/micropython,dmazzella/micropython,puuu/micropython,ernesto-g/micropython,dhylands/micropython,deshipu/micropython,MrSurly/micropython,supergis/micropython,deshipu/micropython,cloudformdesign/micropython,Vogtinator/micropython,rubencabrera/micropython,toolmacher/micropython,tralamazza/micropython,ChuckM/micropython,oopy/micropython,xyb/micropython,xhat/micropython,lbattraw/micropython,pozetroninc/micropython,torwag/micropython,hosaka/micropython,puuu/micropython,suda/micropython,trezor/micropython,dinau/micropython,ryannathans/micropython,lowRISC/micropython,adafruit/circuitpython,ernesto-g/micropython,jmarcelino/pycom-micropython,suda/micropython,slzatz/micropython,aethaniel/micropython,slzatz/micropython,kerneltask/micropython,slzatz/micropython,tuc-osg/micropython,ruffy91/micropython,vitiral/micropython,alex-robbins/micropython,Timmenem/micropython,utopiaprince/micropython,trezor/micropython,bvernoux/micropython,utopiaprince/micropython,vriera/micropython,swegener/micropython,vitiral/micropython,cnoviello/micropython,omtinez/micropython,alex-march/micropython,KISSMonX/micropython,hiway/micropython,mgyenik/micropython,martinribelotta/micropython,dinau/micropython,paul-xxx/micropython,firstval/micropython,firstval/micropython,omtinez/micropython,hiway/micropython,ahotam/micropython,dhylands/micropython,turbinenreiter/micropython,misterdanb/micropython,misterdanb/micropython,emfcamp/micropython,adamkh/micropython,skybird6672/micropython,utopiaprince/micropython,PappaPeppar/micropython,ganshun666/micropython,SHA2017-badge/micropython-esp32,Timmenem/micropython,MrSurly/micropython-esp32,Peetz0r/micropython-esp32,MrSurly/micropython,adafruit/circuitpython,drrk/micropython,PappaPeppar/micropython,ruffy91/micropython,dhylands/micropython,oopy/micropython,matthewelse/micropython,orionrobots/micropython,selste/micropython,Peetz0r/micropython-esp32,alex-march/micropython,adafruit/micropython,xuxiaoxin/micropython,tuc-osg/micropython,heisewangluo/micropython,Vogtinator/micropython,MrSurly/micropython-esp32,vitiral/micropython,warner83/micropython,oopy/micropython,warner83/micropython,Peetz0r/micropython-esp32,micropython/micropython-esp32,noahchense/micropython,mpalomer/micropython,alex-robbins/micropython,pramasoul/micropython,xhat/micropython,selste/micropython,martinribelotta/micropython,lowRISC/micropython,skybird6672/micropython,mhoffma/micropython,lowRISC/micropython,mgyenik/micropython,dmazzella/micropython,oopy/micropython,lbattraw/micropython,torwag/micropython,praemdonck/micropython,dxxb/micropython,feilongfl/micropython,KISSMonX/micropython,TDAbboud/micropython,tdautc19841202/micropython,chrisdearman/micropython,xyb/micropython,rubencabrera/micropython,xhat/micropython,AriZuu/micropython,tralamazza/micropython,infinnovation/micropython,adamkh/micropython,matthewelse/micropython,MrSurly/micropython,stonegithubs/micropython,jlillest/micropython,methoxid/micropystat,turbinenreiter/micropython,noahchense/micropython,redbear/micropython,alex-march/micropython,firstval/micropython,cloudformdesign/micropython,cloudformdesign/micropython,blmorris/micropython,jlillest/micropython,slzatz/micropython,toolmacher/micropython,blazewicz/micropython,skybird6672/micropython,ericsnowcurrently/micropython,KISSMonX/micropython,ceramos/micropython,HenrikSolver/micropython,warner83/micropython,alex-march/micropython,jimkmc/micropython,jmarcelino/pycom-micropython,noahwilliamsson/micropython,noahwilliamsson/micropython,vriera/micropython,HenrikSolver/micropython,hiway/micropython,dhylands/micropython,toolmacher/micropython,kerneltask/micropython,heisewangluo/micropython,xuxiaoxin/micropython,drrk/micropython,jlillest/micropython,PappaPeppar/micropython,xhat/micropython,redbear/micropython,adamkh/micropython,KISSMonX/micropython,Peetz0r/micropython-esp32,TDAbboud/micropython,slzatz/micropython,turbinenreiter/micropython
--- +++ @@ -0,0 +1,13 @@ +# test nested whiles within a try-except + +while 1: + print(1) + try: + print(2) + while 1: + print(3) + break + except: + print(4) + print(5) + break
e61dcb055fb4767e6e662648c89cbdfda4422c97
docs/source/examples/test_no_depends_fails.py
docs/source/examples/test_no_depends_fails.py
from pych.extern import Chapel @Chapel(sfile="users.onlyonce.chpl") def useTwoModules(x=int, y=int): return int if __name__ == "__main__": print(useTwoModules(2, 4)) import testcase # contains the general testing method, which allows us to gather output import os.path def test_using_multiple_modules(): out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. assert "error: Cannot find module \'M1\'" in out
from pych.extern import Chapel @Chapel(sfile="users.onlyonce.chpl") def useTwoModules(x=int, y=int): return int if __name__ == "__main__": print(useTwoModules(2, 4)) import testcase # contains the general testing method, which allows us to gather output import os.path def test_using_multiple_modules(): out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. assert "error: Cannot find module or enum \'M1\'" in out
Update expected error message in this test
Update expected error message in this test With the new ability to "use" enums, the error message for failing to find a module had been updated to indicate that we didn't find a module or an enum, making this test's expected output fail to match. Update the expected error message to reflect this new functionality.
Python
apache-2.0
chapel-lang/pychapel,russel/pychapel,chapel-lang/pychapel,russel/pychapel,russel/pychapel,chapel-lang/pychapel
--- +++ @@ -15,4 +15,4 @@ out = testcase.runpy(os.path.realpath(__file__)) # Ensure that when a used module is nowhere near the exported function, we # get an error message to that effect. - assert "error: Cannot find module \'M1\'" in out + assert "error: Cannot find module or enum \'M1\'" in out
77f812f76966b90c27131fd65968f548afcdcace
svir/dialogs/load_basic_csv_as_layer_dialog.py
svir/dialogs/load_basic_csv_as_layer_dialog.py
# -*- coding: utf-8 -*- # /*************************************************************************** # Irmt # A QGIS plugin # OpenQuake Integrated Risk Modelling Toolkit # ------------------- # begin : 2013-10-24 # copyright : (C) 2018 by GEM Foundation # email : devops@openquake.org # ***************************************************************************/ # # OpenQuake is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # OpenQuake is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. import os import tempfile from svir.utilities.utils import import_layer_from_csv from svir.utilities.shared import OQ_BASIC_CSV_TO_LAYER_TYPES from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog class LoadBasicCsvAsLayerDialog(LoadOutputAsLayerDialog): """ Modal dialog to load as layer a basic csv with no geometries, to be browsed through its attribute table """ def __init__(self, iface, viewer_dock, session, hostname, calc_id, output_type, path=None, mode=None): assert output_type in OQ_BASIC_CSV_TO_LAYER_TYPES, output_type LoadOutputAsLayerDialog.__init__( self, iface, viewer_dock, session, hostname, calc_id, output_type, path, mode) self.create_file_size_indicator() self.setWindowTitle('Load %s from CSV, as layer' % output_type) self.populate_out_dep_widgets() self.adjustSize() self.set_ok_button() def set_ok_button(self): self.ok_button.setEnabled(bool(self.path)) def populate_out_dep_widgets(self): self.show_file_size() def load_from_csv(self): if self.mode == 'testing': dest_shp = tempfile.mkstemp(suffix='.shp')[1] else: dest_shp = None # the destination file will be selected via GUI csv_path = self.path_le.text() # extract the name of the csv file and remove the extension layer_name = os.path.splitext(os.path.basename(csv_path))[0] self.layer = import_layer_from_csv( self, csv_path, layer_name, self.iface, save_as_shp=False, dest_shp=dest_shp, zoom_to_layer=False, has_geom=False)
Add loader for basic csv layers without geoms
Add loader for basic csv layers without geoms
Python
agpl-3.0
gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis
--- +++ @@ -0,0 +1,67 @@ +# -*- coding: utf-8 -*- +# /*************************************************************************** +# Irmt +# A QGIS plugin +# OpenQuake Integrated Risk Modelling Toolkit +# ------------------- +# begin : 2013-10-24 +# copyright : (C) 2018 by GEM Foundation +# email : devops@openquake.org +# ***************************************************************************/ +# +# OpenQuake is free software: you can redistribute it and/or modify it +# under the terms of the GNU Affero General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# OpenQuake is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>. + +import os +import tempfile +from svir.utilities.utils import import_layer_from_csv +from svir.utilities.shared import OQ_BASIC_CSV_TO_LAYER_TYPES +from svir.dialogs.load_output_as_layer_dialog import LoadOutputAsLayerDialog + + +class LoadBasicCsvAsLayerDialog(LoadOutputAsLayerDialog): + """ + Modal dialog to load as layer a basic csv with no geometries, to be + browsed through its attribute table + """ + + def __init__(self, iface, viewer_dock, session, hostname, calc_id, + output_type, path=None, mode=None): + assert output_type in OQ_BASIC_CSV_TO_LAYER_TYPES, output_type + LoadOutputAsLayerDialog.__init__( + self, iface, viewer_dock, session, hostname, calc_id, + output_type, path, mode) + self.create_file_size_indicator() + self.setWindowTitle('Load %s from CSV, as layer' % output_type) + self.populate_out_dep_widgets() + self.adjustSize() + self.set_ok_button() + + def set_ok_button(self): + self.ok_button.setEnabled(bool(self.path)) + + def populate_out_dep_widgets(self): + self.show_file_size() + + def load_from_csv(self): + if self.mode == 'testing': + dest_shp = tempfile.mkstemp(suffix='.shp')[1] + else: + dest_shp = None # the destination file will be selected via GUI + csv_path = self.path_le.text() + # extract the name of the csv file and remove the extension + layer_name = os.path.splitext(os.path.basename(csv_path))[0] + self.layer = import_layer_from_csv( + self, csv_path, layer_name, self.iface, + save_as_shp=False, dest_shp=dest_shp, + zoom_to_layer=False, has_geom=False)
8c7fa4e16805dc9e8adbd5615c610be8ba92c444
ceph_deploy/tests/parser/test_gatherkeys.py
ceph_deploy/tests/parser/test_gatherkeys.py
import pytest from ceph_deploy.cli import get_parser class TestParserGatherKeys(object): def setup(self): self.parser = get_parser() def test_gather_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy gatherkeys' in out assert 'positional arguments:' in out assert 'optional arguments:' in out def test_gatherkeys_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('gatherkeys'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_gatherkeys_one_host(self): args = self.parser.parse_args('gatherkeys host1'.split()) assert args.mon == ['host1'] def test_gatherkeys_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['gatherkeys'] + hostnames) assert args.mon == hostnames
Add argparse tests for gatherkeys
[RM-11742] Add argparse tests for gatherkeys Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
Python
mit
SUSE/ceph-deploy,zhouyuan/ceph-deploy,shenhequnying/ceph-deploy,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,ceph/ceph-deploy,SUSE/ceph-deploy,branto1/ceph-deploy,isyippee/ceph-deploy,osynge/ceph-deploy,Vicente-Cheng/ceph-deploy,isyippee/ceph-deploy,ceph/ceph-deploy,trhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,SUSE/ceph-deploy-to-be-deleted,Vicente-Cheng/ceph-deploy,ghxandsky/ceph-deploy,imzhulei/ceph-deploy,codenrhoden/ceph-deploy,shenhequnying/ceph-deploy,codenrhoden/ceph-deploy,trhoden/ceph-deploy,osynge/ceph-deploy,branto1/ceph-deploy,imzhulei/ceph-deploy
--- +++ @@ -0,0 +1,32 @@ +import pytest + +from ceph_deploy.cli import get_parser + + +class TestParserGatherKeys(object): + + def setup(self): + self.parser = get_parser() + + def test_gather_help(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('gatherkeys --help'.split()) + out, err = capsys.readouterr() + assert 'usage: ceph-deploy gatherkeys' in out + assert 'positional arguments:' in out + assert 'optional arguments:' in out + + def test_gatherkeys_host_required(self, capsys): + with pytest.raises(SystemExit): + self.parser.parse_args('gatherkeys'.split()) + out, err = capsys.readouterr() + assert "error: too few arguments" in err + + def test_gatherkeys_one_host(self): + args = self.parser.parse_args('gatherkeys host1'.split()) + assert args.mon == ['host1'] + + def test_gatherkeys_multiple_hosts(self): + hostnames = ['host1', 'host2', 'host3'] + args = self.parser.parse_args(['gatherkeys'] + hostnames) + assert args.mon == hostnames
f24fe32329625ec037a9afc8d3bdeed5f41e69a0
scripts/diff_incar.py
scripts/diff_incar.py
#!/usr/bin/env python ''' Created on Nov 12, 2011 ''' __author__="Shyue Ping Ong" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "0.1" __maintainer__ = "Shyue Ping Ong" __email__ = "shyue@mit.edu" __date__ = "Nov 12, 2011" import sys import itertools from pymatgen.io.vaspio import Incar from pymatgen.util.string_utils import str_aligned filepath1 = sys.argv[1] filepath2 = sys.argv[2] incar1 = Incar.from_file(filepath1) incar2 = Incar.from_file(filepath2) def format_lists(v): if isinstance(v, (tuple, list)): return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)]) return v d = incar1.diff(incar2) output = [['SAME PARAMS','', '']] output.append(['---------------','', '']) output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"]) output.append(['','', '']) output.append(['DIFFERENT PARAM','', '']) output.append(['---------------','', '']) output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"]) print str_aligned(output, ['', filepath1, filepath2])
Add a script for easy diffing of two Incars.
Add a script for easy diffing of two Incars. Former-commit-id: 998a47c0b96b3024abd82b196f431926cc50847d [formerly 927396038d147b633bee31988cf1e016258c5320] Former-commit-id: 4a8c6bb9cfef4a3a3f6cc211b7ef558a06f523c3
Python
mit
gpetretto/pymatgen,aykol/pymatgen,Bismarrck/pymatgen,czhengsci/pymatgen,johnson1228/pymatgen,Bismarrck/pymatgen,gVallverdu/pymatgen,richardtran415/pymatgen,blondegeek/pymatgen,czhengsci/pymatgen,dongsenfo/pymatgen,montoyjh/pymatgen,czhengsci/pymatgen,fraricci/pymatgen,nisse3000/pymatgen,nisse3000/pymatgen,blondegeek/pymatgen,nisse3000/pymatgen,richardtran415/pymatgen,tschaume/pymatgen,fraricci/pymatgen,matk86/pymatgen,mbkumar/pymatgen,czhengsci/pymatgen,dongsenfo/pymatgen,tallakahath/pymatgen,blondegeek/pymatgen,johnson1228/pymatgen,Bismarrck/pymatgen,dongsenfo/pymatgen,Bismarrck/pymatgen,vorwerkc/pymatgen,gpetretto/pymatgen,johnson1228/pymatgen,vorwerkc/pymatgen,montoyjh/pymatgen,xhqu1981/pymatgen,mbkumar/pymatgen,vorwerkc/pymatgen,davidwaroquiers/pymatgen,matk86/pymatgen,gVallverdu/pymatgen,fraricci/pymatgen,tschaume/pymatgen,matk86/pymatgen,gpetretto/pymatgen,setten/pymatgen,richardtran415/pymatgen,setten/pymatgen,tallakahath/pymatgen,ndardenne/pymatgen,gVallverdu/pymatgen,tschaume/pymatgen,ndardenne/pymatgen,montoyjh/pymatgen,mbkumar/pymatgen,gmatteo/pymatgen,vorwerkc/pymatgen,richardtran415/pymatgen,xhqu1981/pymatgen,montoyjh/pymatgen,davidwaroquiers/pymatgen,matk86/pymatgen,nisse3000/pymatgen,Bismarrck/pymatgen,fraricci/pymatgen,davidwaroquiers/pymatgen,tallakahath/pymatgen,tschaume/pymatgen,gpetretto/pymatgen,xhqu1981/pymatgen,ndardenne/pymatgen,setten/pymatgen,gmatteo/pymatgen,davidwaroquiers/pymatgen,gVallverdu/pymatgen,setten/pymatgen,aykol/pymatgen,dongsenfo/pymatgen,mbkumar/pymatgen,blondegeek/pymatgen,aykol/pymatgen,johnson1228/pymatgen,tschaume/pymatgen
--- +++ @@ -0,0 +1,37 @@ +#!/usr/bin/env python + +''' +Created on Nov 12, 2011 +''' + +__author__="Shyue Ping Ong" +__copyright__ = "Copyright 2011, The Materials Project" +__version__ = "0.1" +__maintainer__ = "Shyue Ping Ong" +__email__ = "shyue@mit.edu" +__date__ = "Nov 12, 2011" + +import sys +import itertools +from pymatgen.io.vaspio import Incar +from pymatgen.util.string_utils import str_aligned + +filepath1 = sys.argv[1] +filepath2 = sys.argv[2] +incar1 = Incar.from_file(filepath1) +incar2 = Incar.from_file(filepath2) + +def format_lists(v): + if isinstance(v, (tuple, list)): + return " ".join([str(i) + "*" + str(len(tuple(group))) for (i,group) in itertools.groupby(v)]) + return v + +d = incar1.diff(incar2) +output = [['SAME PARAMS','', '']] +output.append(['---------------','', '']) +output.extend([(k,format_lists(v),format_lists(v)) for k,v in d['Same'].items() if k != "SYSTEM"]) +output.append(['','', '']) +output.append(['DIFFERENT PARAM','', '']) +output.append(['---------------','', '']) +output.extend([(k,format_lists(v['INCAR1']),format_lists(v['INCAR2'])) for k, v in d['Different'].items() if k != "SYSTEM"]) +print str_aligned(output, ['', filepath1, filepath2])
bb940826d78e44a4098023e83d788b3d915b9b1f
grip/constants.py
grip/constants.py
# The supported extensions, as defined by https://github.com/github/markup supported_extensions = [ '.markdown', '.mdown', '.mkdn', '.md', '.textile', '.rdoc', '.org', '.creole', '.mediawiki', '.wiki', '.rst', '.asciidoc', '.adoc', '.asc', '.pod', ] # The default filenames when no file is provided default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
# The supported extensions, as defined by https://github.com/github/markup supported_extensions = ['.md', '.markdown'] # The default filenames when no file is provided default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
Revert "Add the GitHub-supported format extensions."
Revert "Add the GitHub-supported format extensions." This reverts commit 4f67141cfabe99af99434364e13fec91bef291a7.
Python
mit
jbarreras/grip,joeyespo/grip,mgoddard-pivotal/grip,joeyespo/grip,mgoddard-pivotal/grip,ssundarraj/grip,jbarreras/grip,ssundarraj/grip
--- +++ @@ -1,16 +1,5 @@ # The supported extensions, as defined by https://github.com/github/markup -supported_extensions = [ - '.markdown', '.mdown', '.mkdn', '.md', - '.textile', - '.rdoc', - '.org', - '.creole', - '.mediawiki', '.wiki', - '.rst', - '.asciidoc', '.adoc', '.asc', - '.pod', -] - +supported_extensions = ['.md', '.markdown'] # The default filenames when no file is provided default_filenames = map(lambda ext: 'README' + ext, supported_extensions)
b333d95f3f4187b9d9b480ba8ff4985a62d65f41
tests/pytests/unit/modules/test_nginx.py
tests/pytests/unit/modules/test_nginx.py
import pytest import salt.modules.nginx as nginx from tests.support.mock import patch @pytest.fixture def configure_loader_modules(): return {nginx: {}} @pytest.mark.parametrize( "expected_version,nginx_output", [ ("1.2.3", "nginx version: nginx/1.2.3"), ("1", "nginx version: nginx/1"), ("9.1.100a1+abc123", "nginx version: nginx/9.1.100a1+abc123"), ( "42.9.13.1111111111.whatever", "nginx version: nginx/42.9.13.1111111111.whatever", ), ], ) def test_basic_nginx_version_output(expected_version, nginx_output): with patch.dict(nginx.__salt__, {"cmd.run": lambda *args, **kwargs: nginx_output}): assert nginx.version() == expected_version
Add tests for nginx version
Add tests for nginx version I had considered doing something with regex like `\d+(\.\d+)*`, then I realized that there are other valid version strings that perhaps do not contain numerics separated by `.`, so this option is a bit more flexible as far as what versions nginx can return. The major weakness here is that it does require nginx to return the version on the first line, and to use a `/` separator before the version itself. One potential alternative would be to use the regex `r'(\w+\.(\w+\.?)*)'` and get groups()[0] from that, however that *would* require at least a decimal point. There's a number of other options that one could use here, however, we're currently simply banking on nginx always returning the actual version prefixed with a forward slash.
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
--- +++ @@ -0,0 +1,25 @@ +import pytest +import salt.modules.nginx as nginx +from tests.support.mock import patch + + +@pytest.fixture +def configure_loader_modules(): + return {nginx: {}} + + +@pytest.mark.parametrize( + "expected_version,nginx_output", + [ + ("1.2.3", "nginx version: nginx/1.2.3"), + ("1", "nginx version: nginx/1"), + ("9.1.100a1+abc123", "nginx version: nginx/9.1.100a1+abc123"), + ( + "42.9.13.1111111111.whatever", + "nginx version: nginx/42.9.13.1111111111.whatever", + ), + ], +) +def test_basic_nginx_version_output(expected_version, nginx_output): + with patch.dict(nginx.__salt__, {"cmd.run": lambda *args, **kwargs: nginx_output}): + assert nginx.version() == expected_version
f9b38f675df9752a4b5309df059c6d15a1e1b3c2
ex_range.py
ex_range.py
from collections import namedtuple from vintage_ex import EX_RANGE_REGEXP import location EX_RANGE = namedtuple('ex_range', 'left left_offset separator right right_offset') def get_range_parts(range): parts = EX_RANGE_REGEXP.search(range).groups() return EX_RANGE( left=parts[1], left_offset=parts[3] or '0', separator=parts[5], right=parts[7], right_offset=parts[9] or '0' ) def calculate_range(view, range): parsed_range = get_range_parts(range) if parsed_range.left == '%': left, left_offset = '1', '0' right, right_offset = '$', '0' elif parsed_range.separator: left, left_offset = parsed_range.left, parsed_range.left_offset right, right_offset = parsed_range.right, parsed_range.right_offset return calculate_range_part(view, left) + int(left_offset), \ calculate_range_part(view, right) + int(right_offset) def calculate_range_part(view, p): if p.isdigit(): return int(p) if p.startswith('/') or p.startswith('?'): if p.startswith('?'): return location.reverse_search(view, p[1:-1], end=view.sel()[0].begin()) return location.search(view, p[1:-1]) if p in ('$', '.'): return location.calculate_relative_ref(view, p)
Add module for range support.
Add module for range support.
Python
mit
SublimeText/VintageEx
--- +++ @@ -0,0 +1,43 @@ +from collections import namedtuple + +from vintage_ex import EX_RANGE_REGEXP +import location + + +EX_RANGE = namedtuple('ex_range', 'left left_offset separator right right_offset') + + +def get_range_parts(range): + parts = EX_RANGE_REGEXP.search(range).groups() + return EX_RANGE( + left=parts[1], + left_offset=parts[3] or '0', + separator=parts[5], + right=parts[7], + right_offset=parts[9] or '0' + ) + + +def calculate_range(view, range): + parsed_range = get_range_parts(range) + if parsed_range.left == '%': + left, left_offset = '1', '0' + right, right_offset = '$', '0' + elif parsed_range.separator: + left, left_offset = parsed_range.left, parsed_range.left_offset + right, right_offset = parsed_range.right, parsed_range.right_offset + + return calculate_range_part(view, left) + int(left_offset), \ + calculate_range_part(view, right) + int(right_offset) + + +def calculate_range_part(view, p): + if p.isdigit(): + return int(p) + if p.startswith('/') or p.startswith('?'): + if p.startswith('?'): + return location.reverse_search(view, p[1:-1], + end=view.sel()[0].begin()) + return location.search(view, p[1:-1]) + if p in ('$', '.'): + return location.calculate_relative_ref(view, p)
58ac46511964ca1dd3de25d2b6053eb785e3e281
util/detect-outliers.py
util/detect-outliers.py
#!/usr/bin/env python2 # # Detect outlier faces (not of the same person) in a directory # of aligned images. # Brandon Amos # 2016/02/14 # # Copyright 2015-2016 Carnegie Mellon University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time start = time.time() import argparse import cv2 import itertools import os import glob import numpy as np np.set_printoptions(precision=2) from sklearn.covariance import EllipticEnvelope from sklearn.metrics.pairwise import euclidean_distances import openface fileDir = os.path.dirname(os.path.realpath(__file__)) modelDir = os.path.join(fileDir, '..', 'models') openfaceModelDir = os.path.join(modelDir, 'openface') def main(): parser = argparse.ArgumentParser() parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) parser.add_argument('--imgDim', type=int, help="Default image dimension.", default=96) parser.add_argument('--cuda', action='store_true') parser.add_argument('--threshold', type=int, default=0.9) parser.add_argument('directory') args = parser.parse_args() net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda) reps = [] paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png')))) for imgPath in paths: reps.append(net.forwardPath(imgPath)) mean = np.mean(reps, axis=0) dists = euclidean_distances(reps, mean) outliers = [] for path, dist in zip(paths, dists): dist = dist.take(0) if dist > args.threshold: outliers.append((path, dist)) print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths))) for path, dist in outliers: print(" + {} ({:0.2f})".format(path, dist)) if __name__ == '__main__': main()
Add outlier detection util script.
Add outlier detection util script.
Python
apache-2.0
Alexx-G/openface,nmabhi/Webface,Alexx-G/openface,nmabhi/Webface,xinfang/face-recognize,francisleunggie/openface,cmusatyalab/openface,nmabhi/Webface,Alexx-G/openface,francisleunggie/openface,nhzandi/openface,xinfang/face-recognize,Alexx-G/openface,nmabhi/Webface,xinfang/face-recognize,cmusatyalab/openface,francisleunggie/openface,cmusatyalab/openface,nhzandi/openface,nhzandi/openface
--- +++ @@ -0,0 +1,77 @@ +#!/usr/bin/env python2 +# +# Detect outlier faces (not of the same person) in a directory +# of aligned images. +# Brandon Amos +# 2016/02/14 +# +# Copyright 2015-2016 Carnegie Mellon University +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import time + +start = time.time() + +import argparse +import cv2 +import itertools +import os +import glob + +import numpy as np +np.set_printoptions(precision=2) + +from sklearn.covariance import EllipticEnvelope +from sklearn.metrics.pairwise import euclidean_distances + +import openface + +fileDir = os.path.dirname(os.path.realpath(__file__)) +modelDir = os.path.join(fileDir, '..', 'models') +openfaceModelDir = os.path.join(modelDir, 'openface') + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('--networkModel', type=str, help="Path to Torch network model.", + default=os.path.join(openfaceModelDir, 'nn4.small2.v1.t7')) + parser.add_argument('--imgDim', type=int, + help="Default image dimension.", default=96) + parser.add_argument('--cuda', action='store_true') + parser.add_argument('--threshold', type=int, default=0.9) + parser.add_argument('directory') + + args = parser.parse_args() + + net = openface.TorchNeuralNet(args.networkModel, args.imgDim, cuda=args.cuda) + + reps = [] + paths = sorted(list(glob.glob(os.path.join(args.directory, '*.png')))) + for imgPath in paths: + reps.append(net.forwardPath(imgPath)) + + mean = np.mean(reps, axis=0) + dists = euclidean_distances(reps, mean) + outliers = [] + for path, dist in zip(paths, dists): + dist = dist.take(0) + if dist > args.threshold: + outliers.append((path, dist)) + + print("Found {} outlier(s) from {} images.".format(len(outliers), len(paths))) + for path, dist in outliers: + print(" + {} ({:0.2f})".format(path, dist)) + +if __name__ == '__main__': + main()
964d1f97df600308b23b6a91b9de8811795509a4
sympy/core/tests/test_cache.py
sympy/core/tests/test_cache.py
from sympy.core.cache import cacheit def test_cacheit_doc(): @cacheit def testfn(): "test docstring" pass assert testfn.__doc__ == "test docstring" assert testfn.__name__ == "testfn"
Add a test for the @cachit decorator.
Add a test for the @cachit decorator. Make sure that the caching decorator correctly copies over the function docstring and function name. This fixes issue #744 from the issue tracker. Signed-off-by: Jochen Voss <1dcd5c846f3eb4984f0655fb5407be7c9e0c9079@seehuhn.de> Signed-off-by: Ondrej Certik <b816faa87b7d35274d2e545c5be11ed4376f3ccf@certik.cz>
Python
bsd-3-clause
cccfran/sympy,shipci/sympy,wyom/sympy,Titan-C/sympy,ahhda/sympy,meghana1995/sympy,wanglongqi/sympy,pandeyadarsh/sympy,vipulroxx/sympy,jbbskinny/sympy,jaimahajan1997/sympy,Arafatk/sympy,grevutiu-gabriel/sympy,Gadal/sympy,jerli/sympy,mcdaniel67/sympy,Mitchkoens/sympy,wyom/sympy,oliverlee/sympy,ga7g08/sympy,tovrstra/sympy,hrashk/sympy,Curious72/sympy,kaushik94/sympy,Davidjohnwilson/sympy,kumarkrishna/sympy,oliverlee/sympy,jbbskinny/sympy,MridulS/sympy,atreyv/sympy,AunShiLord/sympy,jerli/sympy,oliverlee/sympy,cswiercz/sympy,moble/sympy,kaichogami/sympy,lindsayad/sympy,wanglongqi/sympy,abhiii5459/sympy,madan96/sympy,toolforger/sympy,jamesblunt/sympy,bukzor/sympy,pbrady/sympy,drufat/sympy,AunShiLord/sympy,atreyv/sympy,sahmed95/sympy,drufat/sympy,Curious72/sympy,skidzo/sympy,dqnykamp/sympy,iamutkarshtiwari/sympy,dqnykamp/sympy,rahuldan/sympy,Sumith1896/sympy,Vishluck/sympy,kumarkrishna/sympy,debugger22/sympy,Davidjohnwilson/sympy,lidavidm/sympy,Shaswat27/sympy,moble/sympy,kaichogami/sympy,mafiya69/sympy,ChristinaZografou/sympy,kaichogami/sympy,jaimahajan1997/sympy,ahhda/sympy,srjoglekar246/sympy,kumarkrishna/sympy,ChristinaZografou/sympy,vipulroxx/sympy,skirpichev/omg,sahmed95/sympy,fperez/sympy,MridulS/sympy,Designist/sympy,sahilshekhawat/sympy,abhiii5459/sympy,kmacinnis/sympy,liangjiaxing/sympy,emon10005/sympy,minrk/sympy,ryanGT/sympy,farhaanbukhsh/sympy,garvitr/sympy,ahhda/sympy,sunny94/temp,hargup/sympy,wyom/sympy,abhiii5459/sympy,cswiercz/sympy,hargup/sympy,farhaanbukhsh/sympy,beni55/sympy,chaffra/sympy,kaushik94/sympy,amitjamadagni/sympy,jerli/sympy,AkademieOlympia/sympy,Curious72/sympy,MechCoder/sympy,grevutiu-gabriel/sympy,aktech/sympy,souravsingh/sympy,yukoba/sympy,maniteja123/sympy,kevalds51/sympy,iamutkarshtiwari/sympy,chaffra/sympy,lindsayad/sympy,Mitchkoens/sympy,VaibhavAgarwalVA/sympy,toolforger/sympy,hazelnusse/sympy-old,abloomston/sympy,asm666/sympy,lidavidm/sympy,abloomston/sympy,VaibhavAgarwalVA/sympy,kevalds51/sympy,maniteja123/sympy,Gadal/sympy,atsao72/sympy,abloomston/sympy,pandeyadarsh/sympy,yashsharan/sympy,maniteja123/sympy,vipulroxx/sympy,yashsharan/sympy,drufat/sympy,yashsharan/sympy,liangjiaxing/sympy,pernici/sympy,souravsingh/sympy,Arafatk/sympy,hazelnusse/sympy-old,emon10005/sympy,skidzo/sympy,debugger22/sympy,kaushik94/sympy,postvakje/sympy,pandeyadarsh/sympy,iamutkarshtiwari/sympy,rahuldan/sympy,kmacinnis/sympy,kevalds51/sympy,farhaanbukhsh/sympy,Sumith1896/sympy,debugger22/sympy,asm666/sympy,VaibhavAgarwalVA/sympy,sampadsaha5/sympy,chaffra/sympy,sahilshekhawat/sympy,Shaswat27/sympy,atreyv/sympy,bukzor/sympy,saurabhjn76/sympy,ga7g08/sympy,sampadsaha5/sympy,Designist/sympy,lindsayad/sympy,jbaayen/sympy,meghana1995/sympy,sahilshekhawat/sympy,AkademieOlympia/sympy,liangjiaxing/sympy,yukoba/sympy,beni55/sympy,madan96/sympy,asm666/sympy,sunny94/temp,hrashk/sympy,atsao72/sympy,ChristinaZografou/sympy,AkademieOlympia/sympy,Arafatk/sympy,aktech/sympy,yukoba/sympy,grevutiu-gabriel/sympy,bukzor/sympy,MechCoder/sympy,lidavidm/sympy,jaimahajan1997/sympy,cccfran/sympy,sunny94/temp,kmacinnis/sympy,souravsingh/sympy,Gadal/sympy,meghana1995/sympy,toolforger/sympy,shikil/sympy,amitjamadagni/sympy,saurabhjn76/sympy,dqnykamp/sympy,Titan-C/sympy,jamesblunt/sympy,pbrady/sympy,skidzo/sympy,emon10005/sympy,minrk/sympy,mcdaniel67/sympy,MechCoder/sympy,shipci/sympy,AunShiLord/sympy,moble/sympy,aktech/sympy,saurabhjn76/sympy,cccfran/sympy,postvakje/sympy,atsao72/sympy,sahmed95/sympy,shipci/sympy,shikil/sympy,sampadsaha5/sympy,ga7g08/sympy,cswiercz/sympy,garvitr/sympy,Vishluck/sympy,mattpap/sympy-polys,postvakje/sympy,hrashk/sympy,Designist/sympy,madan96/sympy,wanglongqi/sympy,Vishluck/sympy,Shaswat27/sympy,Mitchkoens/sympy,shikil/sympy,mafiya69/sympy,Sumith1896/sympy,beni55/sympy,rahuldan/sympy,MridulS/sympy,mafiya69/sympy,Titan-C/sympy,jamesblunt/sympy,Davidjohnwilson/sympy,jbbskinny/sympy,mcdaniel67/sympy,KevinGoodsell/sympy,flacjacket/sympy,pbrady/sympy,diofant/diofant,hargup/sympy,garvitr/sympy
--- +++ @@ -0,0 +1,10 @@ +from sympy.core.cache import cacheit + +def test_cacheit_doc(): + @cacheit + def testfn(): + "test docstring" + pass + + assert testfn.__doc__ == "test docstring" + assert testfn.__name__ == "testfn"
0f1cf524c2b90d77e17d516a30d62632ebb5ed2f
datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py
datathon/datathon_etl_pipelines/generic_imagining/untar_gcs.py
r"""Untar .tar and .tar.gz GCS files.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import apache_beam as beam from apache_beam.options.pipeline_options import PipelineOptions from apache_beam.options.pipeline_options import SetupOptions from datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile from datathon_etl_pipelines.utils import get_setup_file import tensorflow as tf def write_file(element): path, contents = element with tf.io.gfile.GFile(path, 'wb') as fp: fp.write(contents) def main(): """Build and execute the Apache Beam pipeline using the commandline arguments.""" parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( '--input_tars', required=True, nargs='+', help="""One or more wildcard patterns that give the full paths to the input tar files on GCS.""") parser.add_argument( '--output_dir', required=True, help="""The output directory to write the untar'd files to.""") args, pipeline_args = parser.parse_known_args() beam_options = PipelineOptions(pipeline_args) # serialize and provide global imports, functions, etc. to workers. beam_options.view_as(SetupOptions).save_main_session = True beam_options.view_as(SetupOptions).setup_file = get_setup_file() if args.output_dir.endswith('/'): out_dir = args.output_dir[:-1] else: out_dir = args.output_dir def get_full_output_path(relative_path): if relative_path.startswith('/'): return out_dir + relative_path else: return '{}/{}'.format(out_dir, relative_path) with beam.Pipeline(options=beam_options) as p: _ = \ (p | beam.Create(tf.io.gfile.glob(args.input_tars)) | 'Untar' >> beam.ParDo(ReadTarFile(), get_full_output_path) | 'Write' >> beam.Map(write_file)) if __name__ == '__main__': main()
Add pipeline for untar'ing GCS blobs.
Add pipeline for untar'ing GCS blobs. PiperOrigin-RevId: 247210932
Python
apache-2.0
GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare,GoogleCloudPlatform/healthcare
--- +++ @@ -0,0 +1,63 @@ +r"""Untar .tar and .tar.gz GCS files.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import apache_beam as beam +from apache_beam.options.pipeline_options import PipelineOptions +from apache_beam.options.pipeline_options import SetupOptions +from datathon_etl_pipelines.dofns.read_tar_file import ReadTarFile +from datathon_etl_pipelines.utils import get_setup_file +import tensorflow as tf + + +def write_file(element): + path, contents = element + with tf.io.gfile.GFile(path, 'wb') as fp: + fp.write(contents) + + +def main(): + """Build and execute the Apache Beam pipeline using the commandline arguments.""" + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--input_tars', + required=True, + nargs='+', + help="""One or more wildcard patterns that give the full paths to the + input tar files on GCS.""") + + parser.add_argument( + '--output_dir', + required=True, + help="""The output directory to write the untar'd files to.""") + + args, pipeline_args = parser.parse_known_args() + beam_options = PipelineOptions(pipeline_args) + # serialize and provide global imports, functions, etc. to workers. + beam_options.view_as(SetupOptions).save_main_session = True + beam_options.view_as(SetupOptions).setup_file = get_setup_file() + + if args.output_dir.endswith('/'): + out_dir = args.output_dir[:-1] + else: + out_dir = args.output_dir + + def get_full_output_path(relative_path): + if relative_path.startswith('/'): + return out_dir + relative_path + else: + return '{}/{}'.format(out_dir, relative_path) + + with beam.Pipeline(options=beam_options) as p: + _ = \ + (p + | beam.Create(tf.io.gfile.glob(args.input_tars)) + | 'Untar' >> beam.ParDo(ReadTarFile(), get_full_output_path) + | 'Write' >> beam.Map(write_file)) + + +if __name__ == '__main__': + main()
80ccffb269b04af02224c1121c41d4e7c503bc30
tests/util/test_intersperse.py
tests/util/test_intersperse.py
# This file is part of rinohtype, the Python document preparation system. # # Copyright (c) Brecht Machiels. # # Use of this source code is subject to the terms of the GNU Affero General # Public License v3. See the LICENSE file or http://www.gnu.org/licenses/. from rinoh.util import intersperse def test_intersperse(): separator = "." letters = [127, 0, 0, 1] localhost = list(intersperse(letters, separator)) assert [127, ".", 0, ".", 0, ".", 1] == localhost
Add unit test for intersperse
Add unit test for intersperse
Python
agpl-3.0
brechtm/rinohtype,brechtm/rinohtype,brechtm/rinohtype
--- +++ @@ -0,0 +1,16 @@ +# This file is part of rinohtype, the Python document preparation system. +# +# Copyright (c) Brecht Machiels. +# +# Use of this source code is subject to the terms of the GNU Affero General +# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/. + + +from rinoh.util import intersperse + + +def test_intersperse(): + separator = "." + letters = [127, 0, 0, 1] + localhost = list(intersperse(letters, separator)) + assert [127, ".", 0, ".", 0, ".", 1] == localhost
1e9980aff2370b96171011f7fa50d4517957fa86
tilepack/check_toi.py
tilepack/check_toi.py
import mercantile import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('min_lon', type=float, help='Bounding box minimum longitude/left') parser.add_argument('min_lat', type=float, help='Bounding box minimum latitude/bottom') parser.add_argument('max_lon', type=float, help='Bounding box maximum longitude/right') parser.add_argument('max_lat', type=float, help='Bounding box maximum latitude/top') parser.add_argument('min_zoom', type=int, help='The minimum zoom level to include') parser.add_argument('max_zoom', type=int, help='The maximum zoom level to include') args = parser.parse_args() print("zoom\tmissing from toi\tin aoi") for zoom in range(args.min_zoom, args.max_zoom + 1): tiles_in_aoi = set([ '{}/{}/{}'.format(z, x, y) for x, y, z in mercantile.tiles( args.min_lon, args.min_lat, args.max_lon, args.max_lat, [zoom] ) ]) with open('toi.z{}.txt'.format(zoom), 'r') as f: tiles_in_toi = set([ l.strip() for l in f.readlines() ]) print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format( zoom=zoom, tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi), tiles_in_aoi=len(tiles_in_aoi), )) if __name__ == '__main__': main()
Add a script to check TOI coverage for a bbox and zoom range
Add a script to check TOI coverage for a bbox and zoom range
Python
mit
tilezen/tilepacks
--- +++ @@ -0,0 +1,51 @@ +import mercantile +import argparse + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('min_lon', + type=float, + help='Bounding box minimum longitude/left') + parser.add_argument('min_lat', + type=float, + help='Bounding box minimum latitude/bottom') + parser.add_argument('max_lon', + type=float, + help='Bounding box maximum longitude/right') + parser.add_argument('max_lat', + type=float, + help='Bounding box maximum latitude/top') + parser.add_argument('min_zoom', + type=int, + help='The minimum zoom level to include') + parser.add_argument('max_zoom', + type=int, + help='The maximum zoom level to include') + args = parser.parse_args() + + print("zoom\tmissing from toi\tin aoi") + + for zoom in range(args.min_zoom, args.max_zoom + 1): + tiles_in_aoi = set([ + '{}/{}/{}'.format(z, x, y) + for x, y, z in mercantile.tiles( + args.min_lon, args.min_lat, args.max_lon, args.max_lat, + [zoom] + ) + ]) + + with open('toi.z{}.txt'.format(zoom), 'r') as f: + tiles_in_toi = set([ + l.strip() + for l in f.readlines() + ]) + + print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format( + zoom=zoom, + tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi), + tiles_in_aoi=len(tiles_in_aoi), + )) + + +if __name__ == '__main__': + main()
3ae0ea21cc6b1afadb0dd72e29016385d18167ab
DebianDevelChangesBot/utils/fiforeader.py
DebianDevelChangesBot/utils/fiforeader.py
# -*- coding: utf-8 -*- # # Debian Changes Bot # Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import fcntl import select import threading import traceback class FifoReader(object): __shared_state = {} read_lock = threading.Lock() stop_lock = threading.Lock() running = False quitfds = None def __init__(self): self.__dict__ = self.__shared_state print "lol" def start(self, callback, fifo_loc): self.callback = callback self.fifo_loc = fifo_loc threading.Thread(target=self.run).start() def run(self): self.read_lock.acquire() try: for fileobj in self.gen_messages(): try: self.callback(fileobj) except Exception, exc: print "Uncaught exception caught inside fiforeader" traceback.print_exc() finally: fileobj.close() finally: self.read_lock.release() def gen_messages(self): self.running = True self.quitfds = os.pipe() while self.running: fifo = os.open(self.fifo_loc, os.O_RDONLY | os.O_NONBLOCK) flags = fcntl.fcntl(fifo, fcntl.F_GETFL) fcntl.fcntl(fifo, fcntl.F_SETFD, flags & ~os.O_NONBLOCK) readfds, _, _ = select.select([fifo, self.quitfds[0]], [], []) # If our anonymous descriptor was written to, exit loop if not self.running or self.quitfds[0] in readfds: os.close(fifo) os.close(self.quitfds[0]) os.close(self.quitfds[1]) break if fifo not in readfds: continue yield os.fdopen(fifo) def stop(self): self.stop_lock.acquire() try: if self.running: self.running = False os.write(self.quitfds[1], '1') # Block until we have actually stopped self.read_lock.acquire() self.read_lock.release() finally: self.stop_lock.release()
Add FifoReader class to utils
Add FifoReader class to utils Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
Python
agpl-3.0
xtaran/debian-devel-changes-bot,lamby/debian-devel-changes-bot,lamby/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,sebastinas/debian-devel-changes-bot,lamby/debian-devel-changes-bot
--- +++ @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# +# Debian Changes Bot +# Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk> +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +import os +import fcntl +import select +import threading +import traceback + +class FifoReader(object): + __shared_state = {} + + read_lock = threading.Lock() + stop_lock = threading.Lock() + running = False + quitfds = None + + def __init__(self): + self.__dict__ = self.__shared_state + print "lol" + + def start(self, callback, fifo_loc): + self.callback = callback + self.fifo_loc = fifo_loc + threading.Thread(target=self.run).start() + + def run(self): + self.read_lock.acquire() + try: + for fileobj in self.gen_messages(): + try: + self.callback(fileobj) + except Exception, exc: + print "Uncaught exception caught inside fiforeader" + traceback.print_exc() + finally: + fileobj.close() + finally: + self.read_lock.release() + + def gen_messages(self): + self.running = True + self.quitfds = os.pipe() + + while self.running: + fifo = os.open(self.fifo_loc, os.O_RDONLY | os.O_NONBLOCK) + flags = fcntl.fcntl(fifo, fcntl.F_GETFL) + fcntl.fcntl(fifo, fcntl.F_SETFD, flags & ~os.O_NONBLOCK) + + readfds, _, _ = select.select([fifo, self.quitfds[0]], [], []) + + # If our anonymous descriptor was written to, exit loop + if not self.running or self.quitfds[0] in readfds: + os.close(fifo) + os.close(self.quitfds[0]) + os.close(self.quitfds[1]) + break + + if fifo not in readfds: + continue + + yield os.fdopen(fifo) + + def stop(self): + self.stop_lock.acquire() + try: + if self.running: + self.running = False + os.write(self.quitfds[1], '1') + + # Block until we have actually stopped + self.read_lock.acquire() + self.read_lock.release() + finally: + self.stop_lock.release()
7e600a791bec2f8639aae417a1ea052ca94cf7b9
testgen/mc-bundling-x86-gen.py
testgen/mc-bundling-x86-gen.py
#!/usr/bin/python # Auto-generates an exhaustive and repetitive test for correct bundle-locked # alignment on x86. # For every possible offset in an aligned bundle, a bundle-locked group of every # size in the inclusive range [1, bundle_size] is inserted. An appropriate CHECK # is added to verify that NOP padding occurred (or did not occur) as expected. # This script runs with Python 2.6+ (including 3.x) from __future__ import print_function BUNDLE_SIZE_POW2 = 4 BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2 PREAMBLE = ''' # RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\ # RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s # !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!! # It tests that bundle-aligned grouping works correctly in MC. Read the # source of the script for more details. .text .bundle_align_mode {0} '''.format(BUNDLE_SIZE_POW2).lstrip() ALIGNTO = ' .align {0}, 0x90' NOPFILL = ' .fill {0}, 1, 0x90' def print_bundle_locked_sequence(len): print(' .bundle_lock') print(' .rept {0}'.format(len)) print(' inc %eax') print(' .endr') print(' .bundle_unlock') def generate(): print(PREAMBLE) ntest = 0 for instlen in range(1, BUNDLE_SIZE + 1): for offset in range(0, BUNDLE_SIZE): # Spread out all the instructions to not worry about cross-bundle # interference. print(ALIGNTO.format(2 * BUNDLE_SIZE)) print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset)) if offset > 0: print(NOPFILL.format(offset)) print_bundle_locked_sequence(instlen) # Now generate an appropriate CHECK line base_offset = ntest * 2 * BUNDLE_SIZE inst_orig_offset = base_offset + offset # had it not been padded... if offset + instlen > BUNDLE_SIZE: # Padding needed print('# CHECK: {0:x}: nop'.format(inst_orig_offset)) aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1) print('# CHECK: {0:x}: incl'.format(aligned_offset)) else: # No padding needed print('# CHECK: {0:x}: incl'.format(inst_orig_offset)) print() ntest += 1 if __name__ == '__main__': generate()
Add a largish auto-generated test for the aligned bundling feature, along with the script generating it. The test should never be modified manually. If anyone needs to change it, please change the script and re-run it.
Add a largish auto-generated test for the aligned bundling feature, along with the script generating it. The test should never be modified manually. If anyone needs to change it, please change the script and re-run it. The script is placed into utils/testgen - I couldn't think of a better place, and after some discussion on IRC this looked like a logical location. git-svn-id: a4a6f32337ebd29ad4763b423022f00f68d1c7b7@170720 91177308-0d34-0410-b5e6-96231b3b80d8
Python
bsd-3-clause
lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx,lodyagin/bare_cxx
--- +++ @@ -0,0 +1,70 @@ +#!/usr/bin/python + +# Auto-generates an exhaustive and repetitive test for correct bundle-locked +# alignment on x86. +# For every possible offset in an aligned bundle, a bundle-locked group of every +# size in the inclusive range [1, bundle_size] is inserted. An appropriate CHECK +# is added to verify that NOP padding occurred (or did not occur) as expected. + +# This script runs with Python 2.6+ (including 3.x) + +from __future__ import print_function + +BUNDLE_SIZE_POW2 = 4 +BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2 + +PREAMBLE = ''' +# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\ +# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s + +# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!! +# It tests that bundle-aligned grouping works correctly in MC. Read the +# source of the script for more details. + + .text + .bundle_align_mode {0} +'''.format(BUNDLE_SIZE_POW2).lstrip() + +ALIGNTO = ' .align {0}, 0x90' +NOPFILL = ' .fill {0}, 1, 0x90' + +def print_bundle_locked_sequence(len): + print(' .bundle_lock') + print(' .rept {0}'.format(len)) + print(' inc %eax') + print(' .endr') + print(' .bundle_unlock') + +def generate(): + print(PREAMBLE) + + ntest = 0 + for instlen in range(1, BUNDLE_SIZE + 1): + for offset in range(0, BUNDLE_SIZE): + # Spread out all the instructions to not worry about cross-bundle + # interference. + print(ALIGNTO.format(2 * BUNDLE_SIZE)) + print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset)) + if offset > 0: + print(NOPFILL.format(offset)) + print_bundle_locked_sequence(instlen) + + # Now generate an appropriate CHECK line + base_offset = ntest * 2 * BUNDLE_SIZE + inst_orig_offset = base_offset + offset # had it not been padded... + + if offset + instlen > BUNDLE_SIZE: + # Padding needed + print('# CHECK: {0:x}: nop'.format(inst_orig_offset)) + aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1) + print('# CHECK: {0:x}: incl'.format(aligned_offset)) + else: + # No padding needed + print('# CHECK: {0:x}: incl'.format(inst_orig_offset)) + + print() + ntest += 1 + +if __name__ == '__main__': + generate() +
3a19187e8116e8dc20166786fb1ca4d14b527950
ppapi/generators/idl_visitor.py
ppapi/generators/idl_visitor.py
#!/usr/bin/python # # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Visitor Object for traversing AST """ # # IDLVisitor # # The IDLVisitor class will traverse an AST truncating portions of the tree # that fail due to class or version filters. For each node, after the filter # passes, the visitor will call the 'Arive' member passing in the node and # and data passing in from the parent call. It will then Visit the children. # When done processing children, the visitor will call the 'Depart' member # before returning # class IDLVisitor(object): def __init__(self): self.depth = 0 # Return TRUE if the node should be visited def VisitFilter(self, node, data): return True # Return TRUE if data should be added to the childdata list def AgrigateFilter(self, data): return data is not None def Visit(self, node, data): self.depth += 1 if not self.VisitFilter(node, data): return None childdata = [] newdata = self.Arrive(node, data) for child in node.GetChildren(): ret = self.Visit(child, newdata) if self.AgrigateFilter(ret): childdata.append(ret) out = self.Depart(node, newdata, childdata) self.depth -= 1 return out def Arrive(self, node, data): return data def Depart(self, node, data, childdata): return data # # IDLVersionVisitor # # The IDLVersionVisitor will only visit nodes with intervals that include the # version. It will also optionally filter based on a class list # class IDLVersionVisitor(object): def __init__(self, version, classList): self.version = version self.classes = classes def Filter(self, node, data): if self.classList and node.cls not in self.classList: return False if not node.IsVersion(self.version): return False return True class IDLRangeVisitor(object): def __init__(self, vmin, vmax, classList): self.vmin = vmin self.vmax = vmax self.classList = classList def Filter(self, node, data): if self.classList and node.cls not in self.classList: return False if not node.IsVersion(self.version): return False return True
Add missing IDL Visistor class
Add missing IDL Visistor class This class provides a simple mechanism for recursively traversing the AST for both simple and version aware traversal. TBR= sehr@google.com BUG= http://code.google.com/p/chromium/issues/detail?id=87684 TEST= python idl_c_header.py Review URL: http://codereview.chromium.org/7448001 git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@93036 0039d316-1c4b-4281-b951-d872f2087c98
Python
bsd-3-clause
yitian134/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,adobe/chromium,adobe/chromium,adobe/chromium,adobe/chromium,yitian134/chromium,ropik/chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,adobe/chromium,ropik/chromium,yitian134/chromium,gavinp/chromium,ropik/chromium,ropik/chromium,ropik/chromium,ropik/chromium,gavinp/chromium
--- +++ @@ -0,0 +1,81 @@ +#!/usr/bin/python +# +# Copyright (c) 2011 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +""" Visitor Object for traversing AST """ + +# +# IDLVisitor +# +# The IDLVisitor class will traverse an AST truncating portions of the tree +# that fail due to class or version filters. For each node, after the filter +# passes, the visitor will call the 'Arive' member passing in the node and +# and data passing in from the parent call. It will then Visit the children. +# When done processing children, the visitor will call the 'Depart' member +# before returning +# + +class IDLVisitor(object): + def __init__(self): + self.depth = 0 + + # Return TRUE if the node should be visited + def VisitFilter(self, node, data): + return True + + # Return TRUE if data should be added to the childdata list + def AgrigateFilter(self, data): + return data is not None + + def Visit(self, node, data): + self.depth += 1 + if not self.VisitFilter(node, data): return None + + childdata = [] + newdata = self.Arrive(node, data) + for child in node.GetChildren(): + ret = self.Visit(child, newdata) + if self.AgrigateFilter(ret): + childdata.append(ret) + out = self.Depart(node, newdata, childdata) + + self.depth -= 1 + return out + + def Arrive(self, node, data): + return data + + def Depart(self, node, data, childdata): + return data + + +# +# IDLVersionVisitor +# +# The IDLVersionVisitor will only visit nodes with intervals that include the +# version. It will also optionally filter based on a class list +# +class IDLVersionVisitor(object): + def __init__(self, version, classList): + self.version = version + self.classes = classes + + def Filter(self, node, data): + if self.classList and node.cls not in self.classList: return False + if not node.IsVersion(self.version): return False + return True + +class IDLRangeVisitor(object): + def __init__(self, vmin, vmax, classList): + self.vmin = vmin + self.vmax = vmax + self.classList = classList + + def Filter(self, node, data): + if self.classList and node.cls not in self.classList: return False + if not node.IsVersion(self.version): return False + return True + +
bbed7b813b6c809ee9615eabf2fcf4d3156b1c36
tools/convert_release_notes.py
tools/convert_release_notes.py
import sys import mistune print(sys.argv[1]) with open(sys.argv[1], "r") as source_file: source = source_file.read() html = mistune.Markdown() print() print("HTML") print("=====================================") print("From the <a href=\"\">GitHub release page</a>:\n<blockquote>") print(html(source)) print("</blockquote>") class AdafruitBBCodeRenderer: def __init__(self, **kwargs): self.options = kwargs def placeholder(self): return '' def paragraph(self, text): return text + "\n\n" def text(self, text): return text def link(self, link, title, text): return "[url={}]{}[/url]".format(link, text) def header(self, text, level, raw): return "[b][size=150]{}[/size][/b]\n".format(text) def codespan(self, text): return "[color=#E74C3C][size=95]{}[/size][/color]".format(text) def list_item(self, text): return "[*]{}[/*]\n".format(text.strip()) def list(self, body, ordered=True): ordered_indicator = "=" if ordered else "" return "[list{}]\n{}[/list]".format(ordered_indicator, body) def double_emphasis(self, text): return "[b]{}[/b]".format(text) bbcode = mistune.Markdown(renderer=AdafruitBBCodeRenderer()) print() print("BBCode") print("=====================================") print("From the [url=]GitHub release page[/url]:\n[quote]") print(bbcode(source)) print("[/quote]")
Add script to convert release notes from Markdown
Add script to convert release notes from Markdown
Python
mit
adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython,adafruit/micropython,adafruit/circuitpython
--- +++ @@ -0,0 +1,57 @@ +import sys +import mistune + +print(sys.argv[1]) + +with open(sys.argv[1], "r") as source_file: + source = source_file.read() + +html = mistune.Markdown() + +print() +print("HTML") +print("=====================================") +print("From the <a href=\"\">GitHub release page</a>:\n<blockquote>") +print(html(source)) +print("</blockquote>") + +class AdafruitBBCodeRenderer: + def __init__(self, **kwargs): + self.options = kwargs + + def placeholder(self): + return '' + + def paragraph(self, text): + return text + "\n\n" + + def text(self, text): + return text + + def link(self, link, title, text): + return "[url={}]{}[/url]".format(link, text) + + def header(self, text, level, raw): + return "[b][size=150]{}[/size][/b]\n".format(text) + + def codespan(self, text): + return "[color=#E74C3C][size=95]{}[/size][/color]".format(text) + + def list_item(self, text): + return "[*]{}[/*]\n".format(text.strip()) + + def list(self, body, ordered=True): + ordered_indicator = "=" if ordered else "" + return "[list{}]\n{}[/list]".format(ordered_indicator, body) + + def double_emphasis(self, text): + return "[b]{}[/b]".format(text) + +bbcode = mistune.Markdown(renderer=AdafruitBBCodeRenderer()) + +print() +print("BBCode") +print("=====================================") +print("From the [url=]GitHub release page[/url]:\n[quote]") +print(bbcode(source)) +print("[/quote]")
80580b8667558e3a4034b31ac08773de70ef3b39
display_control_consumer/run.py
display_control_consumer/run.py
from setproctitle import setproctitle import json import redis import subprocess import time class DisplayControlConsumer(object): STEP = 0.05 def __init__(self): self.redis_instance = redis.StrictRedis() self.env = {"DISPLAY": ":0"} def get_brightness(self): p = subprocess.Popen(["xrandr", "--verbose"], env=self.env, stdout=subprocess.PIPE) (stdout, _) = p.communicate() for line in stdout.split("\n"): if "Brightness" in line: return float(line.strip().split(": ")[1]) def set_brightness(self, brightness): p = subprocess.Popen(["xrandr", "--q1", "--output", "HDMI-0", "--brightness", unicode(brightness)], env=self.env) p.wait() self.redis_instance.setex("display-control-brightness", 60, brightness) def run(self): while True: time.sleep(1) destination_brightness = self.redis_instance.get("display-control-destination-brightness") if not destination_brightness: continue destination_brightness = float(destination_brightness) current_brightness = self.redis_instance.get("display-control-brightness") if current_brightness: current_brightness = float(current_brightness) else: current_brightness = self.get_brightness() self.redis_instance.setex("display-control-brightness", 60, current_brightness) if current_brightness > destination_brightness: # Decrease brightness. Current brightness is too large. new_brightness = current_brightness - self.STEP print "Decreasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness) if new_brightness < destination_brightness: # Wrapped around: new brightness is smaller than destination brightness.; no action print "Brightness wrapped around" self.redis_instance.delete("display-control-destination-brightness") continue elif current_brightness < destination_brightness: # Increase brightness new_brightness = current_brightness + self.STEP print "Increasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness) if new_brightness > destination_brightness: # Wrapped around; no action self.redis_instance.delete("display-control-destination-brightness") continue else: # Already matches. No action. self.redis_instance.delete("display-control-destination-brightness") continue print "Setting brightness to %s (destination: %s)" % (new_brightness, destination_brightness) self.set_brightness(new_brightness) self.redis_instance.publish("home:broadcast:generic", json.dumps({"key": "display_brightness", "content": new_brightness})) def main(): setproctitle("display_control_consumer: run") dcc = DisplayControlConsumer() dcc.run() if __name__ == '__main__': main()
Implement consumer for adjusting screen brightness.
Implement consumer for adjusting screen brightness. This script polls "display-control-destination-brightness" redis key and slowly adjusts screen brightness towards the value. Key is automatically deleted when destination is reached.
Python
bsd-3-clause
ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display,ojarva/home-info-display
--- +++ @@ -0,0 +1,73 @@ +from setproctitle import setproctitle +import json +import redis +import subprocess +import time + +class DisplayControlConsumer(object): + STEP = 0.05 + + def __init__(self): + self.redis_instance = redis.StrictRedis() + self.env = {"DISPLAY": ":0"} + + + def get_brightness(self): + p = subprocess.Popen(["xrandr", "--verbose"], env=self.env, stdout=subprocess.PIPE) + (stdout, _) = p.communicate() + for line in stdout.split("\n"): + if "Brightness" in line: + return float(line.strip().split(": ")[1]) + + def set_brightness(self, brightness): + p = subprocess.Popen(["xrandr", "--q1", "--output", "HDMI-0", "--brightness", unicode(brightness)], env=self.env) + p.wait() + self.redis_instance.setex("display-control-brightness", 60, brightness) + + def run(self): + while True: + time.sleep(1) + destination_brightness = self.redis_instance.get("display-control-destination-brightness") + if not destination_brightness: + continue + destination_brightness = float(destination_brightness) + + current_brightness = self.redis_instance.get("display-control-brightness") + if current_brightness: + current_brightness = float(current_brightness) + else: + current_brightness = self.get_brightness() + self.redis_instance.setex("display-control-brightness", 60, current_brightness) + + if current_brightness > destination_brightness: + # Decrease brightness. Current brightness is too large. + new_brightness = current_brightness - self.STEP + print "Decreasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness) + if new_brightness < destination_brightness: + # Wrapped around: new brightness is smaller than destination brightness.; no action + print "Brightness wrapped around" + self.redis_instance.delete("display-control-destination-brightness") + continue + elif current_brightness < destination_brightness: + # Increase brightness + new_brightness = current_brightness + self.STEP + print "Increasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness) + + if new_brightness > destination_brightness: + # Wrapped around; no action + self.redis_instance.delete("display-control-destination-brightness") + continue + else: + # Already matches. No action. + self.redis_instance.delete("display-control-destination-brightness") + continue + print "Setting brightness to %s (destination: %s)" % (new_brightness, destination_brightness) + self.set_brightness(new_brightness) + self.redis_instance.publish("home:broadcast:generic", json.dumps({"key": "display_brightness", "content": new_brightness})) +def main(): + setproctitle("display_control_consumer: run") + dcc = DisplayControlConsumer() + dcc.run() + +if __name__ == '__main__': + main()
3fbf2c29a54225e7d4dd882637e68cfe3a4d0101
src/cobwebs/tests/test_mq.py
src/cobwebs/tests/test_mq.py
from cobwebs.mq.core import RPCLink, TopicsLink from cobwebs.mq.backends.rabbitmq import driver import pytest import spider import json from unittest import mock HOST = "127.0.0.1" def test_driver_instance(): assert isinstance(driver.rpc, RPCLink) assert isinstance(driver.topics, TopicsLink) @mock.patch("cobwebs.mq.backends.rabbitmq") def test_rpc(rabbitmq): request = {"action": "list", "data": None} result = rabbitmq.rpc.send("db_driver", json.dumps(request), HOST) rabbitmq.rpc.send.assert_called_with("db_driver", json.dumps(request), HOST) @mock.patch("cobwebs.mq.backends.rabbitmq") def test_topic(rabbitmq): result = rabbitmq.topic.emit(key="test", message="this is just a message") rabbitmq.topic.emit.assert_called_with(key="test", message="this is just a message")
Add some tests for Message Queue
Add some tests for Message Queue
Python
apache-2.0
asteroide/immo_spider,asteroide/immo_spider,asteroide/immo_spider,asteroide/immo_spider
--- +++ @@ -0,0 +1,29 @@ +from cobwebs.mq.core import RPCLink, TopicsLink +from cobwebs.mq.backends.rabbitmq import driver +import pytest +import spider +import json +from unittest import mock + +HOST = "127.0.0.1" + + +def test_driver_instance(): + assert isinstance(driver.rpc, RPCLink) + assert isinstance(driver.topics, TopicsLink) + + +@mock.patch("cobwebs.mq.backends.rabbitmq") +def test_rpc(rabbitmq): + request = {"action": "list", "data": None} + result = rabbitmq.rpc.send("db_driver", json.dumps(request), HOST) + rabbitmq.rpc.send.assert_called_with("db_driver", json.dumps(request), HOST) + + +@mock.patch("cobwebs.mq.backends.rabbitmq") +def test_topic(rabbitmq): + result = rabbitmq.topic.emit(key="test", message="this is just a message") + rabbitmq.topic.emit.assert_called_with(key="test", + message="this is just a message") + +
081b5aabae205ad7c23c512be15ee26276dc8a29
perfkitbenchmarker/providers/azure/util.py
perfkitbenchmarker/providers/azure/util.py
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Verify that Azure CLI is in arm mode.""" from perfkitbenchmarker import events from perfkitbenchmarker import providers from perfkitbenchmarker import vm_util from perfkitbenchmarker.providers import azure class BadAzureCLIModeError(Exception): pass def _CheckAzureCLIMode(sender): assert sender == providers.AZURE, sender stdout, _ = vm_util.IssueRetryableCommand( [azure.AZURE_PATH, 'config']) if 'Current Mode: arm' not in stdout: raise BadAzureCLIModeError('Azure CLI may not be in ARM mode.') events.provider_imported.connect(_CheckAzureCLIMode, providers.AZURE, weak=False)
Check whether Azure CLI is in ARM mode
Check whether Azure CLI is in ARM mode This can prevent some hard-to-debug error messages later on.
Python
apache-2.0
GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,meteorfox/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,GoogleCloudPlatform/PerfKitBenchmarker,meteorfox/PerfKitBenchmarker
--- +++ @@ -0,0 +1,38 @@ +# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Verify that Azure CLI is in arm mode.""" + +from perfkitbenchmarker import events +from perfkitbenchmarker import providers +from perfkitbenchmarker import vm_util +from perfkitbenchmarker.providers import azure + + +class BadAzureCLIModeError(Exception): + pass + + +def _CheckAzureCLIMode(sender): + assert sender == providers.AZURE, sender + + stdout, _ = vm_util.IssueRetryableCommand( + [azure.AZURE_PATH, 'config']) + + if 'Current Mode: arm' not in stdout: + raise BadAzureCLIModeError('Azure CLI may not be in ARM mode.') + + +events.provider_imported.connect(_CheckAzureCLIMode, providers.AZURE, + weak=False)
6705e0e23d13a94726556714e11dfbb7a916877d
zinnia_wymeditor/admin.py
zinnia_wymeditor/admin.py
"""EntryAdmin for zinnia-wymeditor""" from django.contrib import admin from zinnia.models import Entry from zinnia.admin.entry import EntryAdmin class EntryAdminWYMEditorMixin(object): """ Mixin adding WYMeditor for editing Entry.content field. """ pass class EntryAdminWYMEditor(EntryAdminWYMEditorMixin, EntryAdmin): """ Enrich the default EntryAdmin with WYMEditor. """ pass admin.site.unregister(Entry) admin.site.register(Entry, EntryAdminWYMEditor)
Add basic mechanism to override the default EntryAdmin
Add basic mechanism to override the default EntryAdmin
Python
bsd-3-clause
django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor,layar/zinnia-wysiwyg-wymeditor,django-blog-zinnia/zinnia-wysiwyg-wymeditor
--- +++ @@ -0,0 +1,23 @@ +"""EntryAdmin for zinnia-wymeditor""" +from django.contrib import admin + +from zinnia.models import Entry +from zinnia.admin.entry import EntryAdmin + + +class EntryAdminWYMEditorMixin(object): + """ + Mixin adding WYMeditor for editing Entry.content field. + """ + pass + + +class EntryAdminWYMEditor(EntryAdminWYMEditorMixin, + EntryAdmin): + """ + Enrich the default EntryAdmin with WYMEditor. + """ + pass + +admin.site.unregister(Entry) +admin.site.register(Entry, EntryAdminWYMEditor)
389adca1fd52747814f370de2d066a1743544469
solutions/beecrowd/1046/1046.py
solutions/beecrowd/1046/1046.py
start, end = map(int, input().split()) if start == end: result = 24 elif end - start >= 0: result = end - start else: result = 24 + end - start print(f'O JOGO DUROU {result} HORA(S)')
Solve Game Time in python
Solve Game Time in python
Python
mit
deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground,deniscostadsc/playground
--- +++ @@ -0,0 +1,10 @@ +start, end = map(int, input().split()) + +if start == end: + result = 24 +elif end - start >= 0: + result = end - start +else: + result = 24 + end - start + +print(f'O JOGO DUROU {result} HORA(S)')
2199f4c5ed563200d555315b9a8575e00486e667
script/confirmed-fixed-monthly-breakdown.py
script/confirmed-fixed-monthly-breakdown.py
#!/usr/bin/python # A script to draw graphs showing the number of confirmed reports # created each month, and those of which that have been fixed. This # script expects to find a file called 'problems.csv' in the current # directory which should be generated by: # # DIR=`pwd` rake data:create_problem_spreadsheet import csv import datetime from collections import defaultdict import itertools status_types = ('confirmed', 'fixed') counts = {} for status_type in status_types: counts[status_type] = defaultdict(int) today = datetime.date.today() latest_month = earliest_month = (today.year, today.month) maximum_count = -1 with open('problems.csv') as fp: reader = csv.DictReader(fp, delimiter=',', quotechar='"') for row in reader: d = datetime.datetime.strptime(row['Created'], '%H:%M %d %b %Y') ym = (d.year, d.month) earliest_month = min(earliest_month, ym) if row['Status'] == 'confirmed': counts['confirmed'][ym] += 1 elif row['Status'] == 'fixed': counts['fixed'][ym] += 1 maximum_count = max(maximum_count, counts['fixed'][ym], counts['confirmed'][ym]) def months_between(earlier, later): """A generator for iterating over months represented as (year, month) tuples""" year = earlier[0] month = earlier[1] while True: yield (year, month) if month == 12: year = year + 1 month = 1 else: month += 1 if (year, month) > later: return all_months = list(months_between(earliest_month, latest_month)) months = len(all_months) # Make sure that there's at least a zero count for each month we're # considering: for d in counts.values(): for ym in all_months: d[ym] += 0 with open('monthly-breakdown.csv', 'w') as fp: writer = csv.writer(fp) writer.writerow(['Month', 'Confirmed', 'Fixed']) for ym in all_months: writer.writerow(["%d-%02d" % (ym[0], ym[1]), counts['confirmed'][ym], counts['fixed'][ym]])
Add a simple script to generate monthly confirmed / fixed counts
Add a simple script to generate monthly confirmed / fixed counts This is a simple dumbing-down of graph-reports-by-transport-mode.py so there's some repeated code which should be factored out.
Python
agpl-3.0
mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport,mysociety/fixmytransport
--- +++ @@ -0,0 +1,69 @@ +#!/usr/bin/python + +# A script to draw graphs showing the number of confirmed reports +# created each month, and those of which that have been fixed. This +# script expects to find a file called 'problems.csv' in the current +# directory which should be generated by: +# +# DIR=`pwd` rake data:create_problem_spreadsheet + +import csv +import datetime +from collections import defaultdict + +import itertools + +status_types = ('confirmed', 'fixed') + +counts = {} +for status_type in status_types: + counts[status_type] = defaultdict(int) + +today = datetime.date.today() +latest_month = earliest_month = (today.year, today.month) + +maximum_count = -1 + +with open('problems.csv') as fp: + reader = csv.DictReader(fp, delimiter=',', quotechar='"') + for row in reader: + d = datetime.datetime.strptime(row['Created'], + '%H:%M %d %b %Y') + ym = (d.year, d.month) + earliest_month = min(earliest_month, ym) + if row['Status'] == 'confirmed': + counts['confirmed'][ym] += 1 + elif row['Status'] == 'fixed': + counts['fixed'][ym] += 1 + maximum_count = max(maximum_count, counts['fixed'][ym], counts['confirmed'][ym]) + +def months_between(earlier, later): + """A generator for iterating over months represented as (year, month) tuples""" + year = earlier[0] + month = earlier[1] + while True: + yield (year, month) + if month == 12: + year = year + 1 + month = 1 + else: + month += 1 + if (year, month) > later: + return + +all_months = list(months_between(earliest_month, latest_month)) +months = len(all_months) + +# Make sure that there's at least a zero count for each month we're +# considering: +for d in counts.values(): + for ym in all_months: + d[ym] += 0 + +with open('monthly-breakdown.csv', 'w') as fp: + writer = csv.writer(fp) + writer.writerow(['Month', 'Confirmed', 'Fixed']) + for ym in all_months: + writer.writerow(["%d-%02d" % (ym[0], ym[1]), + counts['confirmed'][ym], + counts['fixed'][ym]])
6454548da01dbc2b9f772a5c0ffb11a03dc933e7
draw_shape.py
draw_shape.py
import pygame pygame.init() #-- SCREEN CHARACTERISTICS ------------------------->>> background_color = (255,255,255) (width, height) = (300, 200) #-- RENDER SCREEN ---------------------------------->>> screen = pygame.display.set_mode((width, height)) screen.fill(background_color) #pygame.draw.circle(canvas, color, position(x,y), radius, thickness) pygame.draw.circle(screen, (255,0,0), (150, 100), 10, 1) #-- RUN LOOP --------------------------------------->>> pygame.display.flip() running = True while running: for event in pygame.event.get(): if event.type == pygame.QUIT: running = False
Add module capable of rendering a circle when ran
Add module capable of rendering a circle when ran
Python
mit
withtwoemms/pygame-explorations
--- +++ @@ -0,0 +1,24 @@ +import pygame + + +pygame.init() + +#-- SCREEN CHARACTERISTICS ------------------------->>> +background_color = (255,255,255) +(width, height) = (300, 200) + +#-- RENDER SCREEN ---------------------------------->>> +screen = pygame.display.set_mode((width, height)) +screen.fill(background_color) + +#pygame.draw.circle(canvas, color, position(x,y), radius, thickness) +pygame.draw.circle(screen, (255,0,0), (150, 100), 10, 1) + + +#-- RUN LOOP --------------------------------------->>> +pygame.display.flip() +running = True +while running: + for event in pygame.event.get(): + if event.type == pygame.QUIT: + running = False
5a857703de5fc1e67e958afb41a10db07b98bfa1
scripts/migrate_unconfirmed_valid_users.py
scripts/migrate_unconfirmed_valid_users.py
#!/usr/bin/env python # -*- coding: utf-8 -*- """Script to migrate users with a valid date_last_login but no date_confirmed.""" import sys import logging from website.app import init_app from website.models import User from scripts import utils as script_utils from tests.base import OsfTestCase from tests.factories import UserFactory from modularodm import Q import datetime as dt logger = logging.getLogger(__name__) def do_migration(records): for user in records: user.date_confirmed = user.date_last_login if not user.is_registered: user.is_registered = True logger.info('Finished migrating user {0}'.format(user._id)) def get_targets(): return User.find(Q('date_confirmed', 'eq', None) & Q('date_last_login', 'ne', None)) def main(): init_app(routes=False) # Sets the storage backends on all models if 'dry' in sys.argv: for user in get_targets(): print(user) else: do_migration(get_targets()) class TestMigrateNodeCategories(OsfTestCase): def test_get_targets(self): test = User.find(Q('date_confirmed', 'ne', None) & Q('date_last_login', 'ne', None)) assert test is not None def test_do_migration(self): today = dt.datetime.utcnow() user1 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=False) user2 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=True) user1.save() user2.save() user_list = User.find(Q('_id', 'eq', user1._id) | Q('_id', 'eq', user2._id)) do_migration(user_list) assert user1.date_confirmed is today assert user1.is_registered assert user2.date_confirmed is today assert user2.is_registered if __name__ == '__main__': script_utils.add_file_logger(logger, __file__) main()
Add migration script to fix valid users with date_confirmed==None
Add migration script to fix valid users with date_confirmed==None
Python
apache-2.0
laurenrevere/osf.io,doublebits/osf.io,kch8qx/osf.io,mluo613/osf.io,petermalcolm/osf.io,GageGaskins/osf.io,ZobairAlijan/osf.io,caneruguz/osf.io,njantrania/osf.io,asanfilippo7/osf.io,zkraime/osf.io,petermalcolm/osf.io,himanshuo/osf.io,hmoco/osf.io,GaryKriebel/osf.io,jmcarp/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,haoyuchen1992/osf.io,binoculars/osf.io,hmoco/osf.io,lyndsysimon/osf.io,baylee-d/osf.io,jinluyuan/osf.io,KAsante95/osf.io,laurenrevere/osf.io,samanehsan/osf.io,cslzchen/osf.io,jmcarp/osf.io,arpitar/osf.io,MerlinZhang/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,wearpants/osf.io,RomanZWang/osf.io,zkraime/osf.io,zachjanicki/osf.io,saradbowman/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,njantrania/osf.io,TomHeatwole/osf.io,dplorimer/osf,HarryRybacki/osf.io,fabianvf/osf.io,petermalcolm/osf.io,acshi/osf.io,RomanZWang/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,doublebits/osf.io,ckc6cz/osf.io,jolene-esposito/osf.io,caneruguz/osf.io,felliott/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,dplorimer/osf,abought/osf.io,adlius/osf.io,Ghalko/osf.io,jolene-esposito/osf.io,reinaH/osf.io,doublebits/osf.io,cslzchen/osf.io,kushG/osf.io,amyshi188/osf.io,caseyrollins/osf.io,jmcarp/osf.io,chrisseto/osf.io,samanehsan/osf.io,cosenal/osf.io,revanthkolli/osf.io,amyshi188/osf.io,caseyrygt/osf.io,kushG/osf.io,icereval/osf.io,KAsante95/osf.io,cslzchen/osf.io,brandonPurvis/osf.io,erinspace/osf.io,brandonPurvis/osf.io,TomHeatwole/osf.io,GageGaskins/osf.io,DanielSBrown/osf.io,aaxelb/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,ticklemepierce/osf.io,lyndsysimon/osf.io,GaryKriebel/osf.io,sbt9uc/osf.io,ticklemepierce/osf.io,pattisdr/osf.io,binoculars/osf.io,mluo613/osf.io,asanfilippo7/osf.io,caseyrollins/osf.io,TomHeatwole/osf.io,monikagrabowska/osf.io,kch8qx/osf.io,pattisdr/osf.io,Ghalko/osf.io,bdyetton/prettychart,cldershem/osf.io,cldershem/osf.io,alexschiller/osf.io,jeffreyliu3230/osf.io,SSJohns/osf.io,danielneis/osf.io,zamattiac/osf.io,caseyrygt/osf.io,HarryRybacki/osf.io,aaxelb/osf.io,adlius/osf.io,baylee-d/osf.io,GageGaskins/osf.io,samchrisinger/osf.io,kushG/osf.io,alexschiller/osf.io,lyndsysimon/osf.io,wearpants/osf.io,jinluyuan/osf.io,CenterForOpenScience/osf.io,jeffreyliu3230/osf.io,ticklemepierce/osf.io,jolene-esposito/osf.io,RomanZWang/osf.io,samanehsan/osf.io,revanthkolli/osf.io,jinluyuan/osf.io,barbour-em/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,danielneis/osf.io,mluke93/osf.io,baylee-d/osf.io,Nesiehr/osf.io,HalcyonChimera/osf.io,haoyuchen1992/osf.io,erinspace/osf.io,ticklemepierce/osf.io,zachjanicki/osf.io,mattclark/osf.io,crcresearch/osf.io,billyhunt/osf.io,jeffreyliu3230/osf.io,monikagrabowska/osf.io,billyhunt/osf.io,amyshi188/osf.io,mfraezz/osf.io,Ghalko/osf.io,felliott/osf.io,samchrisinger/osf.io,GaryKriebel/osf.io,jolene-esposito/osf.io,samchrisinger/osf.io,HalcyonChimera/osf.io,samanehsan/osf.io,reinaH/osf.io,mluke93/osf.io,barbour-em/osf.io,kushG/osf.io,caseyrollins/osf.io,cwisecarver/osf.io,SSJohns/osf.io,brianjgeiger/osf.io,himanshuo/osf.io,asanfilippo7/osf.io,adlius/osf.io,SSJohns/osf.io,Ghalko/osf.io,jnayak1/osf.io,binoculars/osf.io,chennan47/osf.io,lyndsysimon/osf.io,TomBaxter/osf.io,abought/osf.io,fabianvf/osf.io,MerlinZhang/osf.io,rdhyee/osf.io,jnayak1/osf.io,sloria/osf.io,leb2dg/osf.io,leb2dg/osf.io,fabianvf/osf.io,zkraime/osf.io,kch8qx/osf.io,samchrisinger/osf.io,Johnetordoff/osf.io,emetsger/osf.io,icereval/osf.io,Nesiehr/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,RomanZWang/osf.io,felliott/osf.io,bdyetton/prettychart,rdhyee/osf.io,revanthkolli/osf.io,zkraime/osf.io,acshi/osf.io,alexschiller/osf.io,chennan47/osf.io,doublebits/osf.io,kwierman/osf.io,mluo613/osf.io,laurenrevere/osf.io,acshi/osf.io,chennan47/osf.io,mluo613/osf.io,wearpants/osf.io,himanshuo/osf.io,abought/osf.io,monikagrabowska/osf.io,mfraezz/osf.io,Nesiehr/osf.io,bdyetton/prettychart,DanielSBrown/osf.io,billyhunt/osf.io,jmcarp/osf.io,kwierman/osf.io,brianjgeiger/osf.io,mattclark/osf.io,hmoco/osf.io,asanfilippo7/osf.io,crcresearch/osf.io,brandonPurvis/osf.io,doublebits/osf.io,TomBaxter/osf.io,zamattiac/osf.io,fabianvf/osf.io,ZobairAlijan/osf.io,adlius/osf.io,ckc6cz/osf.io,kwierman/osf.io,icereval/osf.io,GageGaskins/osf.io,Nesiehr/osf.io,SSJohns/osf.io,mluke93/osf.io,jnayak1/osf.io,jinluyuan/osf.io,lamdnhan/osf.io,ZobairAlijan/osf.io,rdhyee/osf.io,TomBaxter/osf.io,haoyuchen1992/osf.io,caseyrygt/osf.io,barbour-em/osf.io,barbour-em/osf.io,cwisecarver/osf.io,dplorimer/osf,jnayak1/osf.io,zamattiac/osf.io,lamdnhan/osf.io,ZobairAlijan/osf.io,arpitar/osf.io,chrisseto/osf.io,haoyuchen1992/osf.io,njantrania/osf.io,ckc6cz/osf.io,felliott/osf.io,cldershem/osf.io,arpitar/osf.io,dplorimer/osf,CenterForOpenScience/osf.io,zachjanicki/osf.io,cosenal/osf.io,MerlinZhang/osf.io,kch8qx/osf.io,billyhunt/osf.io,erinspace/osf.io,brandonPurvis/osf.io,jeffreyliu3230/osf.io,KAsante95/osf.io,billyhunt/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,mluo613/osf.io,aaxelb/osf.io,emetsger/osf.io,petermalcolm/osf.io,pattisdr/osf.io,njantrania/osf.io,cwisecarver/osf.io,GaryKriebel/osf.io,revanthkolli/osf.io,mluke93/osf.io,chrisseto/osf.io,KAsante95/osf.io,mattclark/osf.io,mfraezz/osf.io,alexschiller/osf.io,saradbowman/osf.io,sloria/osf.io,GageGaskins/osf.io,HarryRybacki/osf.io,hmoco/osf.io,cldershem/osf.io,zachjanicki/osf.io,chrisseto/osf.io,cosenal/osf.io,arpitar/osf.io,wearpants/osf.io,DanielSBrown/osf.io,HalcyonChimera/osf.io,emetsger/osf.io,acshi/osf.io,sloria/osf.io,MerlinZhang/osf.io,CenterForOpenScience/osf.io,himanshuo/osf.io,lamdnhan/osf.io,caseyrygt/osf.io,brianjgeiger/osf.io,TomHeatwole/osf.io,sbt9uc/osf.io,kwierman/osf.io,ckc6cz/osf.io,zamattiac/osf.io,danielneis/osf.io,alexschiller/osf.io,KAsante95/osf.io,lamdnhan/osf.io,bdyetton/prettychart,abought/osf.io,acshi/osf.io,DanielSBrown/osf.io,sbt9uc/osf.io,cosenal/osf.io,sbt9uc/osf.io,caneruguz/osf.io,reinaH/osf.io,leb2dg/osf.io,danielneis/osf.io,amyshi188/osf.io,reinaH/osf.io,emetsger/osf.io
--- +++ @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +"""Script to migrate users with a valid date_last_login but no date_confirmed.""" + +import sys +import logging + +from website.app import init_app +from website.models import User +from scripts import utils as script_utils +from tests.base import OsfTestCase +from tests.factories import UserFactory +from modularodm import Q +import datetime as dt + +logger = logging.getLogger(__name__) + +def do_migration(records): + for user in records: + user.date_confirmed = user.date_last_login + if not user.is_registered: + user.is_registered = True + logger.info('Finished migrating user {0}'.format(user._id)) + +def get_targets(): + return User.find(Q('date_confirmed', 'eq', None) & Q('date_last_login', 'ne', None)) + +def main(): + init_app(routes=False) # Sets the storage backends on all models + if 'dry' in sys.argv: + for user in get_targets(): + print(user) + else: + do_migration(get_targets()) + +class TestMigrateNodeCategories(OsfTestCase): + + def test_get_targets(self): + test = User.find(Q('date_confirmed', 'ne', None) & Q('date_last_login', 'ne', None)) + assert test is not None + + def test_do_migration(self): + today = dt.datetime.utcnow() + user1 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=False) + user2 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=True) + user1.save() + user2.save() + + user_list = User.find(Q('_id', 'eq', user1._id) | Q('_id', 'eq', user2._id)) + do_migration(user_list) + + assert user1.date_confirmed is today + assert user1.is_registered + assert user2.date_confirmed is today + assert user2.is_registered + + +if __name__ == '__main__': + script_utils.add_file_logger(logger, __file__) + main()
edeffbcbe8fb239553c73fa37e73c0188ffc2479
tests/test_cli.py
tests/test_cli.py
import sys import fixtures import imgurpython import testtools import imgur_cli.cli as cli FAKE_ENV = {'IMGUR_CLIENT_ID': 'client_id', 'IMGUR_CLIENT_SECRET': 'client_secret', 'IMGUR_ACCESS_TOKEN': 'access_token', 'IMGUR_REFRESH_TOKEN': 'refresh_token', 'IMGUR_MASHAPE_KEY': 'mashape_key'} class TestImgurCli(testtools.TestCase): def make_env(self, exclude=None): if not exclude: exclude = [] env = {key: value for key, value in FAKE_ENV.items() if key not in exclude} self.useFixture(fixtures.MonkeyPatch('os.environ', env)) def test_imgur_credentials_env(self): self.make_env() expected = ('client_id', 'client_secret', 'access_token', 'refresh_token', 'mashape_key') imgur_credentials = cli.imgur_credentials() self.assertEqual(expected, imgur_credentials) self.make_env(exclude=['IMGUR_MASHAPE_KEY']) expected = ('client_id', 'client_secret', 'access_token', 'refresh_token', None) imgur_credentials = cli.imgur_credentials() self.assertEqual(expected, imgur_credentials) self.make_env(exclude=['IMGUR_CLIENT_ID']) self.assertRaises(imgurpython.client.ImgurClientError, cli.imgur_credentials) self.make_env(exclude=['IMGUR_CLIENT_SECRET']) self.assertRaises(imgurpython.client.ImgurClientError, cli.imgur_credentials)
Add unit test for retrieving credentials from environment variables
Add unit test for retrieving credentials from environment variables
Python
mit
ueg1990/imgur-cli
--- +++ @@ -0,0 +1,40 @@ +import sys + +import fixtures +import imgurpython +import testtools + +import imgur_cli.cli as cli + +FAKE_ENV = {'IMGUR_CLIENT_ID': 'client_id', + 'IMGUR_CLIENT_SECRET': 'client_secret', + 'IMGUR_ACCESS_TOKEN': 'access_token', + 'IMGUR_REFRESH_TOKEN': 'refresh_token', + 'IMGUR_MASHAPE_KEY': 'mashape_key'} + + +class TestImgurCli(testtools.TestCase): + + def make_env(self, exclude=None): + if not exclude: + exclude = [] + env = {key: value for key, value in FAKE_ENV.items() if key not in exclude} + self.useFixture(fixtures.MonkeyPatch('os.environ', env)) + + def test_imgur_credentials_env(self): + self.make_env() + expected = ('client_id', 'client_secret', 'access_token', 'refresh_token', + 'mashape_key') + imgur_credentials = cli.imgur_credentials() + self.assertEqual(expected, imgur_credentials) + self.make_env(exclude=['IMGUR_MASHAPE_KEY']) + expected = ('client_id', 'client_secret', 'access_token', 'refresh_token', + None) + imgur_credentials = cli.imgur_credentials() + self.assertEqual(expected, imgur_credentials) + self.make_env(exclude=['IMGUR_CLIENT_ID']) + self.assertRaises(imgurpython.client.ImgurClientError, + cli.imgur_credentials) + self.make_env(exclude=['IMGUR_CLIENT_SECRET']) + self.assertRaises(imgurpython.client.ImgurClientError, + cli.imgur_credentials)
aa4f1df448c6d01875ed667e37afe68c114892ed
api/mastercoin_verify.py
api/mastercoin_verify.py
import os import glob from flask import Flask, request, jsonify, abort, json data_dir_root = os.environ.get('DATADIR') app = Flask(__name__) app.debug = True @app.route('/addresses') def addresses(): currency_id = request.args.get('currency_id') print currency_id response = [] addr_glob = glob.glob(data_dir_root + '/addr/*.json') for address_file in addr_glob: with open(address_file, 'r') as f: addr = json.load(f) res = { 'address': addr['address'] } if currency_id == '0': btc_balance = [x['value'] for x in addr['balance'] if x['symbol'] == 'BTC'][0] res['balance'] = float(btc_balance) response.append(res) else: if currency_id == '1' or currency_id == '2': msc_currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec if msc_currency_id in addr: print addr[currency_id]['balance'] res['balance'] = float(addr[msc_currency_id]['balance']) response.append(res) json_response = json.dumps(response) return json_response @app.route('/transactions/<address>') def transactions(address=None): return ""
Add initial verification endpoint. Add all balance endpoint
Add initial verification endpoint. Add all balance endpoint
Python
agpl-3.0
Nevtep/omniwallet,VukDukic/omniwallet,Nevtep/omniwallet,habibmasuro/omniwallet,habibmasuro/omniwallet,OmniLayer/omniwallet,OmniLayer/omniwallet,OmniLayer/omniwallet,Nevtep/omniwallet,VukDukic/omniwallet,habibmasuro/omniwallet,Nevtep/omniwallet,achamely/omniwallet,achamely/omniwallet,achamely/omniwallet,OmniLayer/omniwallet,achamely/omniwallet,VukDukic/omniwallet,habibmasuro/omniwallet
--- +++ @@ -0,0 +1,45 @@ +import os +import glob +from flask import Flask, request, jsonify, abort, json + +data_dir_root = os.environ.get('DATADIR') + +app = Flask(__name__) +app.debug = True + + +@app.route('/addresses') +def addresses(): + currency_id = request.args.get('currency_id') + print currency_id + response = [] + addr_glob = glob.glob(data_dir_root + '/addr/*.json') + + for address_file in addr_glob: + with open(address_file, 'r') as f: + addr = json.load(f) + res = { + 'address': addr['address'] + } + + if currency_id == '0': + btc_balance = [x['value'] for x in addr['balance'] if x['symbol'] == 'BTC'][0] + res['balance'] = float(btc_balance) + response.append(res) + else: + if currency_id == '1' or currency_id == '2': + msc_currency_id = str(int(currency_id) - 1) # Mastercoin-tools is off by one on currency id from the spec + + if msc_currency_id in addr: + print addr[currency_id]['balance'] + res['balance'] = float(addr[msc_currency_id]['balance']) + response.append(res) + + json_response = json.dumps(response) + return json_response + + +@app.route('/transactions/<address>') +def transactions(address=None): + + return ""
26bc11340590b0b863527fa12da03cea528feb46
pygerrit/client.py
pygerrit/client.py
""" Gerrit client interface. """ from Queue import Queue, Empty, Full from pygerrit.error import GerritError from pygerrit.events import GerritEventFactory class GerritClient(object): """ Gerrit client interface. """ def __init__(self, host): self._factory = GerritEventFactory() self._host = host self._events = Queue() def get_event(self, block=True, timeout=None): """ Get the next event from the queue. Return a `GerritEvent` instance, or None if: - `block` was False and there is no event available in the queue, or - `block` was True and no event was available within the time specified by `timeout`. """ try: return self._events.get(block, timeout) except Empty: return None def put_event(self, json_data): """ Create event from `json_data` and add it to the queue. Raise GerritError if the queue is full, or the factory could not create the event. """ try: event = self._factory.create(json_data) self._events.put(event) except Full: raise GerritError("Unable to add event: queue is full")
Add initial stub of GerritClient class
Add initial stub of GerritClient class The Gerrit client class will be used as an interface to the Gerrit events stream and query functionality. This is the intial stub. More functionality will be added, and existing stream functionality refactored, in later commits. Change-Id: If4ef838c2d3f3e5afaad2a553af49b1c66ad4278
Python
mit
morucci/pygerrit,sonyxperiadev/pygerrit,dpursehouse/pygerrit,benjiii/pygerrit,gferon/pygerrit2,markon/pygerrit2,dpursehouse/pygerrit2
--- +++ @@ -0,0 +1,43 @@ +""" Gerrit client interface. """ + +from Queue import Queue, Empty, Full + +from pygerrit.error import GerritError +from pygerrit.events import GerritEventFactory + + +class GerritClient(object): + + """ Gerrit client interface. """ + + def __init__(self, host): + self._factory = GerritEventFactory() + self._host = host + self._events = Queue() + + def get_event(self, block=True, timeout=None): + """ Get the next event from the queue. + + Return a `GerritEvent` instance, or None if: + - `block` was False and there is no event available in the queue, or + - `block` was True and no event was available within the time + specified by `timeout`. + + """ + try: + return self._events.get(block, timeout) + except Empty: + return None + + def put_event(self, json_data): + """ Create event from `json_data` and add it to the queue. + + Raise GerritError if the queue is full, or the factory could not + create the event. + + """ + try: + event = self._factory.create(json_data) + self._events.put(event) + except Full: + raise GerritError("Unable to add event: queue is full")
10f99acc11051b37595751b9b9b84e11dd133a64
kolibri/core/content/utils/file_availability.py
kolibri/core/content/utils/file_availability.py
import json import os import re import requests from django.core.cache import cache from kolibri.core.content.models import LocalFile from kolibri.core.content.utils.paths import get_content_storage_dir_path from kolibri.core.content.utils.paths import get_file_checksums_url checksum_regex = re.compile("^([a-f0-9]{32})$") def get_available_checksums_from_remote(channel_id, baseurl): CACHE_KEY = "PEER_AVAILABLE_CHECKSUMS_{baseurl}_{channel_id}".format( baseurl=baseurl, channel_id=channel_id ) if CACHE_KEY not in cache: response = requests.get(get_file_checksums_url(channel_id, baseurl)) checksums = None # Do something if we got a successful return if response.status_code == 200: try: checksums = json.loads(response.content) # Filter to avoid passing in bad checksums checksums = [ checksum for checksum in checksums if checksum_regex.match(checksum) ] cache.set(CACHE_KEY, checksums, 3600) except (ValueError, TypeError): # Bad JSON parsing will throw ValueError # If the result of the json.loads is not iterable, a TypeError will be thrown # If we end up here, just set checksums to None to allow us to cleanly continue pass return cache.get(CACHE_KEY) def get_available_checksums_from_disk(channel_id, basepath): PER_DISK_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}".format(basepath=basepath) PER_DISK_PER_CHANNEL_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}_{channel_id}".format( basepath=basepath, channel_id=channel_id ) if PER_DISK_PER_CHANNEL_CACHE_KEY not in cache: if PER_DISK_CACHE_KEY not in cache: content_dir = get_content_storage_dir_path(datafolder=basepath) disk_checksums = [] for _, _, files in os.walk(content_dir): for name in files: checksum = os.path.splitext(name)[0] # Only add valid checksums formatted according to our standard filename if checksum_regex.match(checksum): disk_checksums.append(checksum) # Cache is per device, so a relatively long lived one should # be fine. cache.set(PER_DISK_CACHE_KEY, disk_checksums, 3600) disk_checksums = set(cache.get(PER_DISK_CACHE_KEY)) channel_checksums = set( LocalFile.objects.filter( files__contentnode__channel_id=channel_id ).values_list("id", flat=True) ) cache.set( PER_DISK_PER_CHANNEL_CACHE_KEY, channel_checksums.intersection(disk_checksums), 3600, ) return cache.get(PER_DISK_PER_CHANNEL_CACHE_KEY)
Add functions for getting available checksums for a channel from remote and disk.
Add functions for getting available checksums for a channel from remote and disk.
Python
mit
mrpau/kolibri,mrpau/kolibri,learningequality/kolibri,indirectlylit/kolibri,learningequality/kolibri,indirectlylit/kolibri,mrpau/kolibri,indirectlylit/kolibri,mrpau/kolibri,indirectlylit/kolibri,learningequality/kolibri,learningequality/kolibri
--- +++ @@ -0,0 +1,73 @@ +import json +import os +import re + +import requests +from django.core.cache import cache + +from kolibri.core.content.models import LocalFile +from kolibri.core.content.utils.paths import get_content_storage_dir_path +from kolibri.core.content.utils.paths import get_file_checksums_url + + +checksum_regex = re.compile("^([a-f0-9]{32})$") + + +def get_available_checksums_from_remote(channel_id, baseurl): + CACHE_KEY = "PEER_AVAILABLE_CHECKSUMS_{baseurl}_{channel_id}".format( + baseurl=baseurl, channel_id=channel_id + ) + if CACHE_KEY not in cache: + response = requests.get(get_file_checksums_url(channel_id, baseurl)) + + checksums = None + + # Do something if we got a successful return + if response.status_code == 200: + try: + checksums = json.loads(response.content) + # Filter to avoid passing in bad checksums + checksums = [ + checksum for checksum in checksums if checksum_regex.match(checksum) + ] + cache.set(CACHE_KEY, checksums, 3600) + except (ValueError, TypeError): + # Bad JSON parsing will throw ValueError + # If the result of the json.loads is not iterable, a TypeError will be thrown + # If we end up here, just set checksums to None to allow us to cleanly continue + pass + return cache.get(CACHE_KEY) + + +def get_available_checksums_from_disk(channel_id, basepath): + PER_DISK_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}".format(basepath=basepath) + PER_DISK_PER_CHANNEL_CACHE_KEY = "DISK_AVAILABLE_CHECKSUMS_{basepath}_{channel_id}".format( + basepath=basepath, channel_id=channel_id + ) + if PER_DISK_PER_CHANNEL_CACHE_KEY not in cache: + if PER_DISK_CACHE_KEY not in cache: + content_dir = get_content_storage_dir_path(datafolder=basepath) + + disk_checksums = [] + + for _, _, files in os.walk(content_dir): + for name in files: + checksum = os.path.splitext(name)[0] + # Only add valid checksums formatted according to our standard filename + if checksum_regex.match(checksum): + disk_checksums.append(checksum) + # Cache is per device, so a relatively long lived one should + # be fine. + cache.set(PER_DISK_CACHE_KEY, disk_checksums, 3600) + disk_checksums = set(cache.get(PER_DISK_CACHE_KEY)) + channel_checksums = set( + LocalFile.objects.filter( + files__contentnode__channel_id=channel_id + ).values_list("id", flat=True) + ) + cache.set( + PER_DISK_PER_CHANNEL_CACHE_KEY, + channel_checksums.intersection(disk_checksums), + 3600, + ) + return cache.get(PER_DISK_PER_CHANNEL_CACHE_KEY)
27d37833663842405f159127f30c6351958fcb10
bench_examples/bench_dec_insert.py
bench_examples/bench_dec_insert.py
from csv import DictWriter from ktbs_bench.utils.decorators import bench @bench def batch_insert(graph, file): """Insert triples in batch.""" print(graph, file) if __name__ == '__main__': # Define some graph/store to use graph_list = ['g1', 'g2'] # Define some files to get the triples from n3file_list = ['f1', 'f2'] # Testing batch insert res = {'func_name': 'batch_insert'} for graph in graph_list: for n3file in n3file_list: time_res = batch_insert(graph, n3file) res[time_res[0]] = time_res[1] # Setup the result CSV with open('/tmp/res.csv', 'wb') as outfile: res_csv = DictWriter(outfile, fieldnames=res.keys()) res_csv.writeheader() # Write the results res_csv.writerow(res)
Add draft of example using the new @bench
Add draft of example using the new @bench
Python
mit
ktbs/ktbs-bench,ktbs/ktbs-bench
--- +++ @@ -0,0 +1,32 @@ +from csv import DictWriter + +from ktbs_bench.utils.decorators import bench + + +@bench +def batch_insert(graph, file): + """Insert triples in batch.""" + print(graph, file) + + +if __name__ == '__main__': + # Define some graph/store to use + graph_list = ['g1', 'g2'] + + # Define some files to get the triples from + n3file_list = ['f1', 'f2'] + + # Testing batch insert + res = {'func_name': 'batch_insert'} + for graph in graph_list: + for n3file in n3file_list: + time_res = batch_insert(graph, n3file) + res[time_res[0]] = time_res[1] + + # Setup the result CSV + with open('/tmp/res.csv', 'wb') as outfile: + res_csv = DictWriter(outfile, fieldnames=res.keys()) + res_csv.writeheader() + + # Write the results + res_csv.writerow(res)
d4a7bbe27b285e455a3beafefd22fc493edeb161
test/test_config_eventlogger.py
test/test_config_eventlogger.py
#!/usr/bin/env python2 import unittest import subprocess import threading import tempfile import os from testdc import * DAEMON_PATH = './astrond' TERMINATED = -15 EXITED = 1 class ConfigTest(object): def __init__(self, config): self.config = config self.process = None def run(self, timeout): def target(): self.process = subprocess.Popen([DAEMON_PATH, self.config]) self.process.communicate() thread = threading.Thread(target=target) thread.start() thread.join(timeout) if thread.is_alive(): self.process.terminate() thread.join() return self.process.returncode class TestConfigEventLogger(unittest.TestCase): @classmethod def setUpClass(cls): cfg, cls.config_file = tempfile.mkstemp() os.close(cfg) cls.test_command = ConfigTest(cls.config_file) @classmethod def tearDownClass(cls): if cls.config_file is not None: os.remove(cls.config_file) @classmethod def write_config(cls, config): f = open(cls.config_file, "w") f.write(config) f.close() @classmethod def run_test(cls, config, timeout = 2): cls.write_config(config) return cls.test_command.run(timeout) def test_eventlogger_good(self): config = """\ messagedirector: bind: 127.0.0.1:57123 roles: - type: eventlogger bind: 0.0.0.0:9090 output: /var/log/astron/eventlogger/el-%Y-%m-%d-%H-%M-%S.log rotate_interval: 1d """ self.assertEquals(self.run_test(config), TERMINATED) if __name__ == '__main__': unittest.main()
Add unittest for eventlogger config validation.
Tests: Add unittest for eventlogger config validation.
Python
bsd-3-clause
ketoo/Astron,pizcogirl/Astron,ketoo/Astron,blindsighttf2/Astron,blindsighttf2/Astron,ketoo/Astron,pizcogirl/Astron,pizcogirl/Astron,ketoo/Astron,blindsighttf2/Astron,pizcogirl/Astron,blindsighttf2/Astron
--- +++ @@ -0,0 +1,71 @@ +#!/usr/bin/env python2 +import unittest +import subprocess +import threading +import tempfile +import os + +from testdc import * + +DAEMON_PATH = './astrond' +TERMINATED = -15 +EXITED = 1 + +class ConfigTest(object): + def __init__(self, config): + self.config = config + self.process = None + + def run(self, timeout): + def target(): + self.process = subprocess.Popen([DAEMON_PATH, self.config]) + self.process.communicate() + + thread = threading.Thread(target=target) + thread.start() + + thread.join(timeout) + if thread.is_alive(): + self.process.terminate() + thread.join() + return self.process.returncode + +class TestConfigEventLogger(unittest.TestCase): + @classmethod + def setUpClass(cls): + cfg, cls.config_file = tempfile.mkstemp() + os.close(cfg) + + cls.test_command = ConfigTest(cls.config_file) + + @classmethod + def tearDownClass(cls): + if cls.config_file is not None: + os.remove(cls.config_file) + + @classmethod + def write_config(cls, config): + f = open(cls.config_file, "w") + f.write(config) + f.close() + + @classmethod + def run_test(cls, config, timeout = 2): + cls.write_config(config) + return cls.test_command.run(timeout) + + def test_eventlogger_good(self): + config = """\ + messagedirector: + bind: 127.0.0.1:57123 + + roles: + - type: eventlogger + bind: 0.0.0.0:9090 + output: /var/log/astron/eventlogger/el-%Y-%m-%d-%H-%M-%S.log + rotate_interval: 1d + """ + self.assertEquals(self.run_test(config), TERMINATED) + +if __name__ == '__main__': + unittest.main()
2c0ce3c64720122bf2fdd80aeb2ff8359873ac83
municipal_finance/tests/test_analytics.py
municipal_finance/tests/test_analytics.py
from django.test import TestCase from django.conf import settings class TestAnalytics(TestCase): def test_noindex_flag(self): response = self.client.get('/') self.assertEqual(response.status_code, 200) self.assertTrue('<meta name="robots" content="noindex">' not in str(response.content)) settings.NO_INDEX = "True" response = self.client.get('/') self.assertEqual(response.status_code, 200) self.assertTrue('<meta name="robots" content="noindex">' in str(response.content))
Test that noindex flag will only show robots metatag when set
Test that noindex flag will only show robots metatag when set
Python
mit
Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data,Code4SA/municipal-data
--- +++ @@ -0,0 +1,15 @@ +from django.test import TestCase +from django.conf import settings + + +class TestAnalytics(TestCase): + + def test_noindex_flag(self): + response = self.client.get('/') + self.assertEqual(response.status_code, 200) + self.assertTrue('<meta name="robots" content="noindex">' not in str(response.content)) + + settings.NO_INDEX = "True" + response = self.client.get('/') + self.assertEqual(response.status_code, 200) + self.assertTrue('<meta name="robots" content="noindex">' in str(response.content))
bac06acb1e6255040f371232776f3da75fb9247a
osf/migrations/0069_auto_20171127_1119.py
osf/migrations/0069_auto_20171127_1119.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-11-27 17:19 from __future__ import unicode_literals import logging from django.db import migrations from osf.models import PreprintService logger = logging.getLogger(__name__) def add_preprint_doi_created(apps, schema_editor): """ Data migration that makes preprint_doi_created equal to date_published for existing published preprints. """ null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False) preprints_count = null_preprint_doi_created.count() current_preprint = 0 logger.info('{} published preprints found with preprint_doi_created is null.'.format(preprints_count)) for preprint in null_preprint_doi_created: current_preprint += 1 if preprint.get_identifier('doi'): preprint.preprint_doi_created = preprint.date_published preprint.save() logger.info('Preprint ID {}, {}/{} preprint_doi_created field populated.'.format(preprint._id, current_preprint, preprints_count)) else: logger.info('Preprint ID {}, {}/{} skipped because a DOI has not been created.'.format(preprint._id, current_preprint, preprints_count)) def reverse_func(apps, schema_editor): """ Reverses data migration. Sets preprint_doi_created field back to null. """ preprint_doi_created_not_null = PreprintService.objects.filter(preprint_doi_created__isnull=False) preprints_count = preprint_doi_created_not_null.count() current_preprint = 0 logger.info('Reversing preprint_doi_created migration.') for preprint in preprint_doi_created_not_null: current_preprint += 1 preprint.preprint_doi_created = None preprint.save() logger.info('Preprint ID {}, {}/{} preprint_doi_created field set to None.'.format(preprint._id, current_preprint, preprints_count)) class Migration(migrations.Migration): dependencies = [ ('osf', '0068_preprintservice_preprint_doi_created'), ] operations = [ migrations.RunPython(add_preprint_doi_created, reverse_func) ]
Add data migration to populate preprint_doi_created field on existing published preprints where DOI identifier exists. Set to preprint date_published field.
Add data migration to populate preprint_doi_created field on existing published preprints where DOI identifier exists. Set to preprint date_published field.
Python
apache-2.0
baylee-d/osf.io,baylee-d/osf.io,erinspace/osf.io,cslzchen/osf.io,mattclark/osf.io,mfraezz/osf.io,cslzchen/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,icereval/osf.io,brianjgeiger/osf.io,felliott/osf.io,cslzchen/osf.io,TomBaxter/osf.io,felliott/osf.io,aaxelb/osf.io,adlius/osf.io,aaxelb/osf.io,felliott/osf.io,laurenrevere/osf.io,Johnetordoff/osf.io,sloria/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,leb2dg/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,mattclark/osf.io,chennan47/osf.io,laurenrevere/osf.io,pattisdr/osf.io,crcresearch/osf.io,TomBaxter/osf.io,icereval/osf.io,erinspace/osf.io,chennan47/osf.io,icereval/osf.io,binoculars/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,binoculars/osf.io,felliott/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,sloria/osf.io,mattclark/osf.io,pattisdr/osf.io,crcresearch/osf.io,binoculars/osf.io,cslzchen/osf.io,mfraezz/osf.io,adlius/osf.io,aaxelb/osf.io,leb2dg/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,TomBaxter/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,leb2dg/osf.io,leb2dg/osf.io,chennan47/osf.io,adlius/osf.io,CenterForOpenScience/osf.io
--- +++ @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- +# Generated by Django 1.11.7 on 2017-11-27 17:19 +from __future__ import unicode_literals +import logging + +from django.db import migrations +from osf.models import PreprintService +logger = logging.getLogger(__name__) + +def add_preprint_doi_created(apps, schema_editor): + """ + Data migration that makes preprint_doi_created equal to date_published for existing published preprints. + """ + null_preprint_doi_created = PreprintService.objects.filter(preprint_doi_created__isnull=True, date_published__isnull=False) + preprints_count = null_preprint_doi_created.count() + current_preprint = 0 + logger.info('{} published preprints found with preprint_doi_created is null.'.format(preprints_count)) + + for preprint in null_preprint_doi_created: + current_preprint += 1 + if preprint.get_identifier('doi'): + preprint.preprint_doi_created = preprint.date_published + preprint.save() + logger.info('Preprint ID {}, {}/{} preprint_doi_created field populated.'.format(preprint._id, current_preprint, preprints_count)) + else: + logger.info('Preprint ID {}, {}/{} skipped because a DOI has not been created.'.format(preprint._id, current_preprint, preprints_count)) + +def reverse_func(apps, schema_editor): + """ + Reverses data migration. Sets preprint_doi_created field back to null. + """ + preprint_doi_created_not_null = PreprintService.objects.filter(preprint_doi_created__isnull=False) + preprints_count = preprint_doi_created_not_null.count() + current_preprint = 0 + logger.info('Reversing preprint_doi_created migration.') + + for preprint in preprint_doi_created_not_null: + current_preprint += 1 + preprint.preprint_doi_created = None + preprint.save() + logger.info('Preprint ID {}, {}/{} preprint_doi_created field set to None.'.format(preprint._id, current_preprint, preprints_count)) + +class Migration(migrations.Migration): + + dependencies = [ + ('osf', '0068_preprintservice_preprint_doi_created'), + ] + + operations = [ + migrations.RunPython(add_preprint_doi_created, reverse_func) + ]
167a6497d79a4a18badd5ea85a87e7eefcd02696
test/acceptance/__init__.py
test/acceptance/__init__.py
# -*- coding: utf-8 -*- """ Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U This file is part of fiware-orion-pep fiware-orion-pep is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. fiware-orion-pep is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with fiware-orion-pep. If not, see http://www.gnu.org/licenses/. For those usages not covered by the GNU Affero General Public License please contact with::[iot_support@tid.es] """ __author__ = 'Jon Calderin Goñi <jon.caldering@gmail.com>' import os """ Make sure the logs path exists and create it otherwise. """ if not os.path.exists('logs'): os.makedirs('logs')
Add init file to the root acceptance tests folder
Add init file to the root acceptance tests folder
Python
agpl-3.0
telefonicaid/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin,agroknow/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin,telefonicaid/fiware-pep-steelskin
--- +++ @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +""" +Copyright 2014 Telefonica Investigación y Desarrollo, S.A.U + +This file is part of fiware-orion-pep + +fiware-orion-pep is free software: you can redistribute it and/or +modify it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the License, +or (at your option) any later version. + +fiware-orion-pep is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. +See the GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public +License along with fiware-orion-pep. +If not, see http://www.gnu.org/licenses/. + +For those usages not covered by the GNU Affero General Public License +please contact with::[iot_support@tid.es] +""" +__author__ = 'Jon Calderin Goñi <jon.caldering@gmail.com>' + +import os + +""" +Make sure the logs path exists and create it otherwise. +""" +if not os.path.exists('logs'): + os.makedirs('logs')
b171eb0c77f2d68051b48145f4e49275ed6860b9
account/tests/test_models.py
account/tests/test_models.py
from django.conf import settings from django.core import mail from django.core.urlresolvers import reverse from django.test import TestCase, override_settings from django.contrib.auth.models import User from account.models import SignupCode class SignupCodeModelTestCase(TestCase): def test_exists_no_match(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertFalse(SignupCode.exists(code='BARBAR')) self.assertFalse(SignupCode.exists(email='bar@example.com')) self.assertFalse(SignupCode.exists(email='bar@example.com', code='BARBAR')) self.assertFalse(SignupCode.exists()) def test_exists_email_only_match(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertTrue(SignupCode.exists(email='foobar@example.com')) def test_exists_code_only_match(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertTrue(SignupCode.exists(code='FOOFOO')) self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO')) def test_exists_email_match_code_mismatch(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertTrue(SignupCode.exists(email='foobar@example.com', code='BARBAR')) def test_exists_code_match_email_mismatch(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO')) def test_exists_both_match(self): code = SignupCode(email='foobar@example.com', code='FOOFOO') code.save() self.assertTrue(SignupCode.exists(email='foobar@example.com', code='FOOFOO'))
Add tests for signup code exists method
Add tests for signup code exists method
Python
mit
pinax/django-user-accounts,pinax/django-user-accounts
--- +++ @@ -0,0 +1,50 @@ +from django.conf import settings +from django.core import mail +from django.core.urlresolvers import reverse +from django.test import TestCase, override_settings + +from django.contrib.auth.models import User + +from account.models import SignupCode + + +class SignupCodeModelTestCase(TestCase): + def test_exists_no_match(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertFalse(SignupCode.exists(code='BARBAR')) + self.assertFalse(SignupCode.exists(email='bar@example.com')) + self.assertFalse(SignupCode.exists(email='bar@example.com', code='BARBAR')) + self.assertFalse(SignupCode.exists()) + + def test_exists_email_only_match(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertTrue(SignupCode.exists(email='foobar@example.com')) + + def test_exists_code_only_match(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertTrue(SignupCode.exists(code='FOOFOO')) + self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO')) + + def test_exists_email_match_code_mismatch(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertTrue(SignupCode.exists(email='foobar@example.com', code='BARBAR')) + + def test_exists_code_match_email_mismatch(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertTrue(SignupCode.exists(email='bar@example.com', code='FOOFOO')) + + def test_exists_both_match(self): + code = SignupCode(email='foobar@example.com', code='FOOFOO') + code.save() + + self.assertTrue(SignupCode.exists(email='foobar@example.com', code='FOOFOO'))
95874a5e06ff70d1cbea49321549beee5cc5abba
examples/store_and_retrieve_units_example.py
examples/store_and_retrieve_units_example.py
""" Author: Daniel Berke, berke.daniel@gmail.com Date: October 27, 2019 Requirements: h5py>=2.10.0, unyt>=v2.4.0 Notes: This short example script shows how to save unit information attached to a `unyt_array` using `attrs` in HDF5, and recover it upon reading the file. It uses the Unyt package (https://github.com/yt-project/unyt) because that's what I'm familiar with, but presumably similar options exist for Pint and astropy.units. """ import h5py import tempfile import unyt as u # Set up a temporary file for this example. tf = tempfile.TemporaryFile() f = h5py.File(tf, 'a') # Create some mock data with moderately complicated units (this is the # dimensional representation of Joules of energy). test_data = [1, 2, 3, 4, 5] * u.kg * ( u.m / u.s ) ** 2 print(test_data.units) # kg*m**2/s**2 # Create a data set to hold the numerical information: f.create_dataset('stored data', data=test_data) # Save the units information as a string in `attrs`. f['stored data'].attrs['units'] = str(test_data.units) # Now recover the data, using the saved units information to reconstruct the # original quantities. reconstituted_data = u.unyt_array(f['stored data'], units=f['stored data'].attrs['units']) print(reconstituted_data.units) # kg*m**2/s**2 assert reconstituted_data.units == test_data.units
Create an example of storing units in HDF5
Create an example of storing units in HDF5 This example script demonstrates how to store unit information attached to an array of numbers using HDF5 in such a way that the original data can be recovered upon reading the file. It uses the Unyt package (https://github.com/yt-project/unyt) for handling units, but presumably similar methods would work for people using astropy.units or Pint. It relies on Unyt being able to be able to parse its own string-ified versions of the units attached correctly, but as it should hopefully create such strings consistently it should work for the majority of use cases.
Python
bsd-3-clause
h5py/h5py,h5py/h5py,h5py/h5py
--- +++ @@ -0,0 +1,40 @@ +""" +Author: Daniel Berke, berke.daniel@gmail.com +Date: October 27, 2019 +Requirements: h5py>=2.10.0, unyt>=v2.4.0 +Notes: This short example script shows how to save unit information attached +to a `unyt_array` using `attrs` in HDF5, and recover it upon reading the file. +It uses the Unyt package (https://github.com/yt-project/unyt) because that's +what I'm familiar with, but presumably similar options exist for Pint and +astropy.units. +""" + +import h5py +import tempfile +import unyt as u + +# Set up a temporary file for this example. +tf = tempfile.TemporaryFile() +f = h5py.File(tf, 'a') + +# Create some mock data with moderately complicated units (this is the +# dimensional representation of Joules of energy). +test_data = [1, 2, 3, 4, 5] * u.kg * ( u.m / u.s ) ** 2 +print(test_data.units) +# kg*m**2/s**2 + +# Create a data set to hold the numerical information: +f.create_dataset('stored data', data=test_data) + +# Save the units information as a string in `attrs`. +f['stored data'].attrs['units'] = str(test_data.units) + +# Now recover the data, using the saved units information to reconstruct the +# original quantities. +reconstituted_data = u.unyt_array(f['stored data'], + units=f['stored data'].attrs['units']) + +print(reconstituted_data.units) +# kg*m**2/s**2 + +assert reconstituted_data.units == test_data.units